SDMTools/0000755000176000001440000000000013570411137011750 5ustar ripleyusersSDMTools/NAMESPACE0000644000176000001440000000305713456113301013167 0ustar ripleyusersS3method(image,asc) S3method(print,asc) export(COGravity) export(ClassStat) export(ConnCompLabel) export(ImageDiff) export(Istat) export(Kappa) export(PatchStat) export(Scalebar) export(SigDiff) export(ZonalStat) export(accuracy) export(as.asc) export(asc.from.raster) export(asc.from.sp) export(asc2dataframe) export(aspect) export(auc) export(circular.averaging) export(compare.matrix) export(confusion.matrix) export(dataframe2asc) export(destination) export(distance) export(extract.data) export(getXYcoords) export(grid.area) export(grid.info) export(grid.perimeter) export(lcmw) export(legend.gradient) export(omission) export(optim.thresh) export(pnt.in.poly) export(prop.correct) export(put.data) export(quick.map) export(raster.from.asc) export(read.asc) export(read.asc.gz) export(sensitivity) export(slope) export(sp.from.asc) export(specificity) export(vector.averaging) export(write.asc) export(write.asc.gz) export(write.asc2) export(write.asc2.gz) export(wt.mean) export(wt.sd) export(wt.var) import(R.utils) useDynLib(SDMTools,Aspect) useDynLib(SDMTools,Dest) useDynLib(SDMTools,Dist) useDynLib(SDMTools,Slope) useDynLib(SDMTools,ccl) useDynLib(SDMTools,geographicPS) useDynLib(SDMTools,getmin) useDynLib(SDMTools,movewindow) useDynLib(SDMTools,pip) useDynLib(SDMTools,projectedPS) useDynLib(SDMTools,writeascdata) importFrom("grDevices", "gray", "heat.colors", "rainbow", "terrain.colors") importFrom("graphics", "contour", "image", "polygon", "rect", "segments", "text") importFrom("stats", "aggregate", "na.omit", "pnorm", "quantile", "sd") SDMTools/ChangeLog0000755000176000001440000000152712370121703013524 0ustar ripleyusersThis is the changelog where bug fixes & improvements are described from 16 May 2012 onward. 2 April 2014 - corrected distance for when locations are identical 10 February 2014 - version xxx - improved write.asc which is much faster - altered read.asc to accept names that are upper and lowercase asc - changed to use roxygen2 format for helpfiles 18 September 2013 - version 1.1-14 - improved the speed at which ascii's files are written for both write.asc and the new write.asc2 function. 8 November 2012 - version 1.1-13 - bugfix in aspect whereby aspect was not correctly calculated for directions between 90 & 180 degrees. thanks for the catch to RobertMiller7@u.boisestate.edu 16 May 2012 - version 1.1-12 - bugfix in ClassStats - splitting index equation corrected as Hossam Moniem noted the numerator was to be squared. SDMTools/man/0000755000176000001440000000000012370121703012515 5ustar ripleyusersSDMTools/man/destination.Rd0000644000176000001440000001002512370121703015323 0ustar ripleyusers\name{destination} \alias{destination} \title{Vincenty Direct Calculation of a Destination} \source{ The source code here was modified from \url{http://www.movable-type.co.uk/scripts/latlong-vincenty-direct.html}.\cr \cr Destinations were validated against Geoscience Australia calculations (\url{http://www.ga.gov.au/geodesy/datums/vincenty_direct.jsp}). } \usage{ destination(lat, lon, bearing, distance) } \arguments{ \item{lat}{a single value or vector of values representing latitude in decimal degrees from -90 to 90 degrees.} \item{lon}{a single value or vector of values representing longitude in decimal degrees from -180 to 180 degrees.} \item{bearing}{a single value or vector of values representing the bearings (directions) of interest ranging from 0 to 360 degrees.} \item{distance}{a single value or vector of values representing the distances in metres to the destination.} } \value{ Returns a data.frame with: \item{lon1}{the original longitude} \item{lat1}{the original latitude} \item{bearing}{the bearing used} \item{distance}{the distance used} \item{lon2}{the destination longitude} \item{lat2}{the destination latitude} } \description{ \code{destination} estimates the destination latitude and longitude given a starting latitude and longitude, a bearing and distance. \cr \cr For general information on Vincenty's formula, see e.g., \url{http://en.wikipedia.org/wiki/Vincenty's_formulae}. It states: \cr \emph{Vincenty's formulae are two related iterative methods used in geodesy to calculate the distance between two points on the surface of an spheroid, developed by Thaddeus Vincenty in 1975. They are based on the assumption that the figure of the Earth is an oblate spheroid, and hence are more accurate than methods such as great-circle distance which assume a spherical Earth.} \cr \cr \bold{Note:} this method assumes a locations are lat & lon given in WGS 84. } \details{ Typical useages are:\cr \enumerate{ \item a single start location, bearing and distance to give a single output location\cr --output would be a single destination location \item a single start location with one or more bearings or distances to give multiple output locations\cr --output would be a destination locations for each combination of bearings and distances \item multiple start locations with a single bearing or distance\cr --output would be a destination locations representing the bearing and distance from each of the start locations \item multiple start locations with multiple bearings or distances\cr --output would be a destination locations representing the combinations of bearings and distances from each of the start locations\cr -- NOTE that the bearing and distance vectors must be of the same length of the input lat and long. } See examples for all possible usages. } \examples{ ###single lat lons lats = -85; lons = 165 #single bearing & single distance destination(lats,lons,bearing=180,distance=500000) #multiple bearings destination(lats,lons,bearing=seq(0,360,length.out=9),distance=500000) #multiple bearings destination(lats,lons,bearing=45,distance=seq(0,5000000,length.out=11)) #multiple bearings, multiple distances destination(lats,lons,bearing=seq(0,360,length.out=9), distance=seq(0,5000000,length.out=11)) ###multiple lat lons lats = seq(-90,90,length.out=9); lons = seq(-180,180,length.out=9) #multiple lat lons but single bearings / distances destination(lats,lons,bearing=45,distance=500000) #different bearings for each lat lon destination(lats,lons,bearing=seq(0,360,length.out=9),distance=500000) #different distances for each lat lon destination(lats,lons,bearing=45,distance=seq(0,5000000,length.out=9)) #different bearings & distances for each lat lon destination(lats,lons,bearing=seq(0,360,length.out=9), distance=seq(0,5000000,length.out=9)) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \references{ Vincenty, T. 1975. Direct and Inverse Solutions of Geodesics on the Ellipsoid with application of Nested Equations. Survey Review, vol XXII no 176. \url{http://www.ngs.noaa.gov/PUBS_LIB/inverse.pdf} } SDMTools/man/getXYcoords.Rd0000644000176000001440000000131712370121703015260 0ustar ripleyusers\name{getXYcoords} \alias{getXYcoords} \title{Computes the X and Y Coordinates of the Pixels of a Raster Map} \usage{ getXYcoords(w) } \arguments{ \item{w}{an object of class \code{asc}.} } \value{ Returns a list with two components: \item{x}{the x coordinates of the columns of pixels of the map} \item{y}{the y coordinates of the rows of pixels of the map} } \description{ \code{getXYcoords} computes the geographical coordinates of the rows and columns of pixels of a raster map of class \code{asc}. Code & helpfile were modified from adehabitat package. } \examples{ tasc = as.asc(matrix(rep(x=1:10, times=1000),nr=100)); print(tasc) getXYcoords(tasc) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/legend.gradient.Rd0000644000176000001440000000333512370121703016042 0ustar ripleyusers\name{legend.gradient} \alias{legend.gradient} \title{Legend Gradient} \usage{ legend.gradient(pnts, cols = heat.colors(100), limits = c(0, 1), title = "Legend", ...) } \arguments{ \item{pnts}{x and y coordinates of the gradient location in the plot} \item{cols}{a set of 2 or more colors used in the image, to create the gradient} \item{limits}{to label the min and max values of the gradient in the legend} \item{title}{to specify the title of the legend} \item{...}{other graphical parameters defined by image() or plot()} } \value{ nothing is returned, a gradient legend is added to a plot or a image. } \description{ \code{legend.gradient} creates and displays a gradient legend on a plot or image file. The place and size of the legend is defined by coordinates, previously identified. } \examples{ #define a simple binary matrix tmat = { matrix(c( 0,0,0,1,0,0,1,1,0,1, 0,0,1,0,1,0,0,0,0,0, 0,1,NA,1,0,1,0,0,0,1, 1,0,1,1,1,0,1,0,0,1, 0,1,0,1,0,1,0,0,0,1, 0,0,1,0,1,0,0,1,1,0, 1,0,0,1,0,0,1,0,0,0, 0,1,0,0,0,1,0,NA,NA,NA, 0,0,1,1,1,0,0,NA,NA,NA, 1,1,1,0,0,0,0,NA,NA,NA),nr=10,byrow=TRUE) } #do the connected component labeling tasc = ConnCompLabel(tmat) # Create a color ramp colormap=c("grey","yellow","yellowgreen","olivedrab1","lightblue4") #create an image image(tasc,col=colormap, axes=FALSE, xlab="", ylab="", ann=FALSE) #points for the gradient legend pnts = cbind(x =c(0.8,0.9,0.9,0.8), y =c(1.0,1.0,0.8,0.8)) #create the gradient legend legend.gradient(pnts,colormap,c("Low","High")) } \author{ Lorena Falconi \email{lorefalconi@gmail.com} } SDMTools/man/ClassStat.Rd0000644000176000001440000001417512370121703014715 0ustar ripleyusers\name{ClassStat} \alias{ClassStat} \title{Landscape Class Statistics} \usage{ ClassStat(mat, cellsize = 1, bkgd = NA, latlon = FALSE) } \arguments{ \item{mat}{a matrix of data with patches identified as classes (unique integer values) as e.g., a binary lanscape of a species distribution or a vegetation map. Matrix can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} \item{cellsize}{cell size (in meters) is a single value representing the width/height of cell edges (assuming square cells)} \item{bkgd}{the background value for which statistics will not be calculated} \item{latlon}{boolean value representing if the data is geographic. If latlon == TRUE, matrix must be of class 'asc', 'RasterLayer' or 'SpatialGridDataFrame'} } \value{ a data.frame listing \item{class}{a particular patch type from the original input matrix (\code{mat}).} \item{n.patches}{the number of patches of a particular patch type or in a class.} \item{total.area}{the sum of the areas (m2) of all patches of the corresponding patch type.} \item{prop.landscape}{the proportion of the total lanscape represented by this class} \item{patch.density}{the numbers of patches of the corresponding patch type divided by total landscape area (m2).} \item{total.edge}{the total edge length of a particular patch type.} \item{edge.density}{edge length on a per unit area basis that facilitates comparison among landscapes of varying size.} \item{landscape.shape.index}{a standardized measure of total edge or edge density that adjusts for the size of the landscape.} \item{largest.patch.index}{largest patch index quantifies the percentage of total landscape area comprised by the largest patch.} \item{mean.patch.area}{average area of patches.} \item{sd.patch.area}{standard deviation of patch areas.} \item{min.patch.area}{the minimum patch area of the total patch areas. } \item{max.patch.area}{the maximum patch area of the total patch areas.} \item{perimeter.area.frac.dim}{perimeter-area fractal dimension equals 2 divided by the slope of regression line obtained by regressing the logarithm of patch area (m2) against the logarithm of patch perimeter (m).} \item{mean.perim.area.ratio}{the mean of the ratio patch perimeter. The perimeter-area ratio is equal to the ratio of the patch perimeter (m) to area (m2).} \item{sd.perim.area.ratio}{standard deviation of the ratio patch perimeter.} \item{min.perim.area.ratio}{minimum perimeter area ratio} \item{max.perim.area.ratio}{maximum perimeter area ratio.} \item{mean.shape.index}{mean of shape index} \item{sd.shape.index}{standard deviation of shape index.} \item{min.shape.index}{the minimum shape index.} \item{max.shape.index}{the maximum shape index.} \item{mean.frac.dim.index}{mean of fractal dimension index.} \item{sd.frac.dim.index}{standard deviation of fractal dimension index.} \item{min.frac.dim.index}{the minimum fractal dimension index.} \item{max.frac.dim.index}{the maximum fractal dimension index.} \item{total.core.area}{the sum of the core areas of the patches (m2).} \item{prop.landscape.core}{proportional landscape core} \item{mean.patch.core.area}{mean patch core area.} \item{sd.patch.core.area}{standard deviation of patch core area.} \item{min.patch.core.area}{the minimum patch core area.} \item{max.patch.core.area}{the maximum patch core area.} \item{prop.like.adjacencies}{calculated from the adjacency matrix, which shows the frequency with which different pairs of patch types (including like adjacencies between the same patch type) appear side-by-side on the map (measures the degree of aggregation of patch types).} \item{aggregation.index}{computed simply as an area-weighted mean class aggregation index, where each class is weighted by its proportional area in the landscape.} \item{lanscape.division.index}{based on the cumulative patch area distribution and is interpreted as the probability that two randomly chosen pixels in the landscape are not situated in the same patch} \item{splitting.index}{based on the cumulative patch area distribution and is interpreted as the effective mesh number, or number of patches with a constant patch size when the landscape is subdivided into S patches, where S is the value of the splitting index.} \item{effective.mesh.size}{equals 1 divided by the total landscape area (m2) multiplied by the sum of patch area (m2) squared, summed across all patches in the landscape.} \item{patch.cohesion.index}{measures the physical connectedness of the corresponding patch type.} } \description{ \code{ClassStat} calculates the class statistics for patch types identified in a matrix of data or in a raster of class 'asc' (SDMTools & adehabitat packages), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package). } \details{ The class statistics are based on statistics calculated by fragstats \url{http://www.umass.edu/landeco/research/fragstats/fragstats.html}. } \examples{ #define a simple binary matrix tmat = { matrix(c( 0,0,0,1,0,0,1,1,0,1, 0,0,1,0,1,0,0,0,0,0, 0,1,NA,1,0,1,0,0,0,1, 1,0,1,1,1,0,1,0,0,1, 0,1,0,1,0,1,0,0,0,1, 0,0,1,0,1,0,0,1,1,0, 1,0,0,1,0,0,1,0,0,1, 0,1,0,0,0,1,0,0,0,1, 0,0,1,1,1,0,0,0,0,1, 1,1,1,0,0,0,0,0,0,1),nr=10,byrow=TRUE) } #do the connected component labelling ccl.mat = ConnCompLabel(tmat) ccl.mat image(t(ccl.mat[10:1,]),col=c('grey',rainbow(length(unique(ccl.mat))-1))) #calculate the patch statistics ps.data = PatchStat(ccl.mat) ps.data #calculate the class statistics cl.data = ClassStat(tmat) cl.data #identify background data is 0 cl.data = ClassStat(tmat,bkgd=0) cl.data } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \references{ McGarigal, K., S. A. Cushman, M. C. Neel, and E. Ene. 2002. FRAGSTATS: Spatial Pattern Analysis Program for Categorical Maps. Computer software program produced by the authors at the University of Massachusetts, Amherst. Available at the following web site: \url{www.umass.edu/landeco/research/fragstats/fragstats.html} } \seealso{ \code{\link{PatchStat}}, \code{\link{ConnCompLabel}} } SDMTools/man/grid.info.Rd0000644000176000001440000000324712370121703014671 0ustar ripleyusers\name{grid.info} \alias{grid.info} \title{Grid Information from Geographic (lat lon) Projections} \usage{ grid.info(lats, cellsize, r = 6378137) } \arguments{ \item{lats}{is a vector of latitudes representing the midpoint of grid cells} \item{cellsize}{is a single value (assuming square cells) or a two value vector (rectangular cells) representing the height (latitude) and width (longitude) of the cells} \item{r}{is a single value representing the radius of the globe in m. Default is for the WGS84 elipsoid} } \value{ a data.frame listing: \item{lat}{the latitude representing the midpoint of the cell} \item{top}{length of the top of the cell (m)} \item{bottom}{length of the bottom of the cell (m)} \item{side}{length of the side of the cell (m)} \item{diagnal}{length of the diagnals of the cell (m)} \item{area}{area of the cell (m2)} } \description{ Since spatial grids in geographic projections do not have equal area or perimeters, \code{grid.info} extracts perimeter & area related information for latitudinal bands with differing longitudinal widths. \cr\cr Outputs lengths are in m using Vincenty's equation (\code{distance})and areas in m2. Surface areas are calculated summing surface areas of spherical polygons as estimated using l'Huiller's formula. } \examples{ #show output for latitudes from -87.5 to 87.5 at 5 degree intervals grid.info(lats=seq(-87.5,87.5,5), 5) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \references{ information on l'Huiller's formula \url{http://williams.best.vwh.net/avform.htm for more info)} code for estimating area of polygon on sphere was modified from \url{http://forum.worldwindcentral.com/showthread.php?t=20724} } SDMTools/man/Scalebar.Rd0000644000176000001440000000231412370121703014520 0ustar ripleyusers\name{Scalebar} \alias{Scalebar} \title{Scalebar for Projected Maps} \usage{ Scalebar(x, y, distance, unit = "km", scale = 1, t.cex = 0.8) } \arguments{ \item{x}{the x-axis position for the lower left corner of the bar} \item{y}{the x-axis position for the lower left corner of the bar} \item{distance}{the distance for which the scale bar should represent} \item{unit}{the units to report as the scaling} \item{scale}{the scaling factor to rescale the distance to a different unit. e.g., if your map is in m and want the scalebar to be in km, use a scale of 0.01} \item{t.cex}{the scaling of the font size to be used for the scalebar} } \value{ nothing is returned, simply a scalebar is added to a plot. } \description{ \code{Scalebar} adds a distance scalebar onto a projected map. It is not appropriate for geographic projections. } \examples{ #create a simple object of class 'asc' tasc = as.asc(matrix(1:50,nr=50,nc=50)); print(tasc) #plot the image image(tasc,axes=FALSE,ann=FALSE) #add a distance scalebar Scalebar(x=5,y=5,distance=20) #show values in km Scalebar(x=5,y=10,distance=20,unit='m',scale=1000) #show values in meters } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/SigDiff.Rd0000644000176000001440000000703512370121703014324 0ustar ripleyusers\name{SigDiff} \alias{ImageDiff} \alias{SigDiff} \title{Identify Regions of Significant Differences} \usage{ SigDiff(x, y, pattern = TRUE) ImageDiff(tasc, sig.levels = c(0.025, 0.975), tcol = terrain.colors(3), ...) } \arguments{ \item{x}{a vector or matrix of data; the matrix can be of can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} \item{y}{a vector or matrix of data with the same dimensions and class of 'x'} \item{pattern}{logical value defining if differences are respective to relative patterning (TRUE) or absolute values (FALSE)} \item{tasc}{a matrix of probability values (0 to 1) likely created by \code{SigDiff}; The matrix can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} \item{sig.levels}{the significance levels to define significantly above and below. Default settings represent significance at the 0.05 level} \item{tcol}{a set of 3 colors for use in the image to represent significantly lower or greater, and not significant} \item{...}{other graphical parameters defined by image() or plot()} } \value{ \code{SigDiff} returns a vector or matrix of the same dimensions and class of the input representing the significance of the pairwise difference relative to the mean and variance of all differences between the two inputs. \cr \cr \code{ImageDiff} returns nothing but creates an image of the areas of significant differences } \description{ \code{SigDiff} computes the significance of the pairwise differences relative to the mean and variance of all differences between the two input datasets. This is useful for identifying regions of significant difference between two datasets (e.g., different DEMs (Januchowski et al. 2010) or different species distribution model predictions (Bateman et al 2010)). \cr \cr \code{ImageDiff} is a wrapper to the image.asc command in adehabitat package that uses the result from \code{SigDiff} to create an image mapping the regions of significant differences (positive and negative). \cr \cr \bold{NOTE:} it is assumed the input data are of the same extent and cellsize. } \examples{ #create some simple objects of class 'asc' tasc = as.asc(matrix(1:50,nr=50,nc=50)); print(tasc) #modify the asc objects so that they are slightly different tasc1 = tasc + runif(n = 2500, min = -1, max = 1) tasc2 = tasc + rnorm(n = 2500, mean = 1, sd = 1) #create graphical representation par(mfrow=c(2,2),mar=c(1,1,4,1)) image(tasc1,main='first grid',axes=FALSE) image(tasc2,main='second grid',axes=FALSE) #get significant difference by spatial patterning out = SigDiff(tasc1,tasc2) ImageDiff(out,main="Pattern Differences",axes=FALSE) #get significant difference out = SigDiff(tasc1,tasc2,pattern=FALSE) ImageDiff(out,main="Absolute Differences",axes=FALSE) legend('topleft',legend=c('-ve','ns','+ve'),title='significance', fill=terrain.colors(3),bg='white') } \author{ Stephanie Januchowski \email{stephierenee@gmail.com} } \references{ Januchowski, S., Pressey, B., Vanderwal, J. & Edwards, A. (2010) Characterizing errors in topographic models and estimating the financial costs of accuracy. International Journal of Geographical Information Science, In Press. \cr \cr Bateman, B.L., VanDerWal, J., Williams, S.E. & Johnson, C.N. (2010) Inclusion of biotic interactions in species distribution models improves predictions under climate change: the northern bettong Bettongia tropica, its food resources and a competitor. Journal of Biogeography, In Review. } SDMTools/man/extract.data.Rd0000644000176000001440000000277712370121703015403 0ustar ripleyusers\name{extract.data} \alias{extract.data} \title{Spatial Join of Points with Raster Grids} \usage{ \method{extract}{data}(pts, x) } \arguments{ \item{pts}{a two-column data frame or matrix with the x and y coordinates of the locations of interest.} \item{x}{a raster matrix of class 'asc' (this and the adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} } \value{ Returns a vector equal in length to the number of locations in pnts. } \description{ \code{extract.data} extracts data from raster object of class 'asc' (this and the adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package) at specified locations. This represents a faster version of 'join.asc' of the adehabitat package that assumes all locations are within the map extents. \cr \cr \bold{Note:} there is no interpolation done here. The values reported are simply the values of the raster cell the point falls into. } \details{ Implements a faster version of 'join.asc' from the adehabitat package. \cr \cr \bold{NOTE:} this assumes all locations are within the extent of the raster map. Values outside the extent will be given a value of NA. } \examples{ #create a simple object of class 'asc' tasc = as.asc(matrix(1:50,nr=50,nc=50)); print(tasc) #define some point locations points = data.frame(x=runif(25,1,50),y=runif(25,1,50)) #extract the data points$values = extract.data(points,tasc) #show the data print(points) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/asc.from.raster.Rd0000644000176000001440000000442012370121703016013 0ustar ripleyusers\name{asc.from.raster} \alias{as.asc} \alias{asc.from.raster} \alias{asc.from.sp} \alias{raster.from.asc} \alias{sp.from.asc} \title{Raster conversion functions for adehabitat, raster and sp packages} \usage{ asc.from.raster(x) raster.from.asc(x, projs = NA) asc.from.sp(x) sp.from.asc(x, projs = CRS(as.character(NA))) as.asc(x, xll = 1, yll = 1, cellsize = 1, type = c("numeric", "factor"), lev = levels(factor(x))) } \arguments{ \item{x}{is an object of class 'asc', 'RasterLayer' or 'SpatialGridDataFrame'. For the function \code{as.asc}, a matrix} \item{projs}{is a CRS projection string of the Proj4 package} \item{xll}{the x coordinate of the center of the lower left pixel of the map} \item{yll}{the y coordinate of the center of the lower left pixel of the map} \item{cellsize}{the size of a pixel on the studied map} \item{type}{a character string. Either \code{"numeric"} or \code{"factor"}} \item{lev}{if \code{type = "factor"}, either a vector giving the labels of the factor levels, or the name of a file giving the correspondence table of the map (see adehabitat as.asc helpfile details)} } \value{ Returns an object of class requested. } \description{ \code{asc.from.raster} and \code{asc.from.sp} extracts data from objects of class 'RasterLayer' (raster package) and class 'SpatialGridDataFrame' (sp package) into an object of class 'asc' (SDMTools & adehabitat packages). \cr \cr \code{raster.from.asc} and \code{sp.from.asc} does the reverse.\cr\cr \code{as.asc} creates an object of class 'asc' (SDMTools & adehabitat packages) from a matrix of data. Code & helpfile associated with \code{as.asc} were modified from adehabitat package. } \details{ These functions provide capabilities of using scripts / functions from many packages including adehabitat (plus e.g, SDMTools), sp (plus e.g., maptools, rgdal) and raster. } \examples{ #create a simple object of class 'asc' tasc = as.asc(matrix(rep(x=1:10, times=1000),nr=100)); print(tasc) str(tasc) #convert to RasterLayer traster = raster.from.asc(tasc) str(traster) #convert to SpatialGridDataFrame tgrid = sp.from.asc(tasc) str(tgrid) #create a basic object of class asc tasc = as.asc(matrix(rep(x=1:10, times=1000),nr=100)); print(tasc) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/PatchStat.Rd0000644000176000001440000000651612370121703014707 0ustar ripleyusers\name{PatchStat} \alias{PatchStat} \title{Landscape Patch Statistics} \usage{ PatchStat(mat, cellsize = 1, latlon = FALSE) } \arguments{ \item{mat}{a matrix of data with individual patches identified as with \code{ConnCompLabel}; The matrix can be a raster of class 'asc' (this & adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} \item{cellsize}{cell size (in meters) is a single value representing the width/height of cell edges (assuming square cells)} \item{latlon}{boolean value representing if the data is geographic. If latlon == TRUE, matrix must be of class 'asc', 'RasterLayer' or 'SpatialGridDataFrame'} } \value{ a data.frame listing \item{patchID}{the unique ID for each patch.} \item{n.cell}{the number of cells for each patch, specified in square meters.} \item{n.core.cell}{the number of cells in the core area, without the edge area.} \item{n.edges.perimeter}{the number of outer perimeter cell edges of the patch.} \item{n.edges.internal}{the number of internal cell edges of the patch.} \item{area}{the area of each patch comprising a landscape mosaic.} \item{core.area}{represents the interior area of the patch, greater than the specified depth-of-edge distance from the perimeter.} \item{perimeter}{the perimeter of the patch, including any internal holes in the patch, specified in meters.} \item{perim.area.ratio}{the ratio of the patch perimeter (m) to area (m2).} \item{shape.index}{the shape complexity, sum of each patches perimeter divided by the square root of patch area.} \item{frac.dim.index}{fractal dimension index reflects shape complexity across a range of spatial scales; approaches 2 times the logarithm of patch perimeter (m) divided by the logarithm of patch area (m2).} \item{core.area.index}{quantifies core area as a percentage of patch area.} } \description{ \code{PatchStat} calculates the patch statistics for individual patches identified in a matrix of data. The matrix can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package). } \details{ The patch statistics are based on statistics calculated by fragstats \url{http://www.umass.edu/landeco/research/fragstats/fragstats.html}. } \examples{ #define a simple binary matrix tmat = { matrix(c( 0,0,0,1,0,0,1,1,0,1, 0,0,1,0,1,0,0,0,0,0, 0,1,NA,1,0,1,0,0,0,1, 1,0,1,1,1,0,1,0,0,1, 0,1,0,1,0,1,0,0,0,1, 0,0,1,0,1,0,0,1,1,0, 1,0,0,1,0,0,1,0,0,1, 0,1,0,0,0,1,0,0,0,1, 0,0,1,1,1,0,0,0,0,1, 1,1,1,0,0,0,0,0,0,1),nr=10,byrow=TRUE) } #do the connected component labelling ccl.mat = ConnCompLabel(tmat) ccl.mat image(t(ccl.mat[10:1,]),col=c('grey',rainbow(length(unique(ccl.mat))-1))) #calculate the patch statistics ps.data = PatchStat(ccl.mat) ps.data } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \references{ McGarigal, K., S. A. Cushman, M. C. Neel, and E. Ene. 2002. FRAGSTATS: Spatial Pattern Analysis Program for Categorical Maps. Computer software program produced by the authors at the University of Massachusetts, Amherst. Available at the following web site: \url{www.umass.edu/landeco/research/fragstats/fragstats.html} } \seealso{ \code{\link{ClassStat}}, \code{\link{ConnCompLabel}} } SDMTools/man/wt.mean.Rd0000644000176000001440000000214312370121703014355 0ustar ripleyusers\name{wt.mean} \alias{wt.mean} \alias{wt.sd} \alias{wt.var} \title{Weighted mean, variance and standard deviation calculations} \usage{ wt.mean(x, wt) wt.var(x, wt) wt.sd(x, wt) } \arguments{ \item{x}{is a vector of numerical data.} \item{wt}{is a vector of equal length to \code{x} representing the weights.)} } \value{ returns a single value from analysis requested. } \description{ \code{wt.mean} calculates the mean given a weighting of the values. \cr \cr \code{wt.var} is the unbiased variance of the weighted mean calculation using equations of GNU Scentific Library (\url{http://www.gnu.org/software/gsl/manual/html_node/Weighted-Samples.htmland}.\cr\cr \code{wt.sd} is the standard deviation of the weighted mean calculated as the sqrt of \code{wt.var}. \cr \cr \bold{Note:} NA data is automatically ommitted from analysis. } \examples{ #define simple data x = 1:25 # set of numbers wt = runif(25) #some arbitrary weights #display means & variances (unweighted and then weighted) mean(x); wt.mean(x,wt) var(x); wt.var(x,wt) sd(x); wt.sd(x,wt) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/read.asc.Rd0000644000176000001440000000612412370121703014467 0ustar ripleyusers\name{read.asc} \alias{image.asc} \alias{print.asc} \alias{read.asc} \alias{read.asc.gz} \alias{write.asc} \alias{write.asc.gz} \alias{write.asc2} \alias{write.asc2.gz} \title{ESRI ASCII Raster File Import And Export} \usage{ read.asc(file, gz = FALSE) read.asc.gz(file) write.asc(x, file, gz = FALSE) write.asc.gz(x, file) write.asc2(x, file, sigdig = 0, gz = FALSE) write.asc2.gz(x, file, sigdig = 0) \method{image}{asc}(x, col = gray((240:1)/256), clfac = NULL, ...) \method{print}{asc}(x, ...) } \arguments{ \item{file}{a character string representing the filename of the input/output file. The file extension should always be '.asc'.} \item{gz}{defines if the object is or should be compressed using gzip} \item{x}{an object of class 'asc' as defined in the adehabitat package} \item{sigdig}{is the number of significant digits to write when creating the ascii grid file} \item{col}{for maps of type \code{"numeric"}, the colors to be used (see \code{help(par)})} \item{clfac}{for maps of type \code{"factor"}, a character vector giving the names of colors for each level of the factor (see \code{help(colasc)})} \item{\dots}{additional arguments to be passed to the generic function \code{image} or \code{print}} } \value{ Returns a raster matrix of the class 'asc' defined in the adehabitat package with the following attributes: \item{xll}{the x coordinate of the center of the lower left pixel of the map} \item{yll}{the y coordinate of the center of the lower left pixel of the map} \item{cellsize}{the size of a pixel on the studied map} \item{type}{either 'numeric' or 'factor'} \item{levels}{if type = 'factor', the levels of the factor.} } \description{ \code{read.asc} and \code{read.asc.gz} reads ESRI ArcInfo ASCII raster file either uncompressed or compressed using gzip. \cr \cr \code{write.asc} and \code{write.asc.gz} writes an asc object to a ESRI ArcInfo ASCII raster file. The output can be either compressed or uncompressed. \cr \cr These functions are faster methods based on the adehabitat import.asc and export.asc.\cr\cr \code{write.asc2} and \code{write.asc2.gz} are even faster implementations but have less error checking. \cr \cr \code{image.asc} and \code{print.asc} are generic methods associated with plotting & summarizing data of class 'asc'; they were modified from adehabitat package. } \details{ Implements a faster version of import.asc or export.asc from the adehabitat package. In addition, files can be read in and written to in gzip compressed format.\cr\cr Generic methods of print and image were modified from adehabitat. Further details of them are found there. } \examples{ #create a simple object of class 'asc' tasc = as.asc(matrix(rep(x=1:10, times=1000),nr=100)); print(tasc) #write out the raster grid file write.asc(tasc,'t.raster.asc') write.asc.gz(tasc,'t.raster.asc') #actually save file name as t.raster.asc.gz #read in the raster grid files tasc2 = read.asc('t.raster.asc') tasc3 = read.asc.gz('t.raster.asc.gz') #remove the temporary raster unlink(c('t.raster.asc','t.raster.asc.gz')) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/grid.area.Rd0000644000176000001440000000176112370121703014645 0ustar ripleyusers\name{grid.area} \alias{grid.area} \alias{grid.perimeter} \title{Create a Grid of Cell Areas or Perimeters} \usage{ grid.area(mat) grid.perimeter(mat) } \arguments{ \item{mat}{a matrix representing a raster of class 'asc' (this & adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} } \value{ \item{grid.area}{Returns an ascii grid file which contains the values of the area in each cell.} \item{grid.perimter}{Returns an ascii grid file which contains the values of the perimeter in each cell. } } \description{ Creates a grid of cell areas or perimeters for spatial grids in geographic (lat-lon) projections. } \examples{ #Create an ascii file y=seq(10,50,0.5) x=seq(140,180,0.5) cellsize=0.5 data1=sample(160,140) out1.asc=as.asc(matrix(data1,nc=y, nr=x), xll=min(x), yll=min(y), cellsize=cellsize) grid.area(out1.asc)[,] grid.perimeter(out1.asc)[,] } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} & Lorena Falconi \email{lorefalconi@gmail.com} } SDMTools/man/confusion.matrix.Rd0000644000176000001440000000224212370121703016312 0ustar ripleyusers\name{confusion.matrix} \alias{confusion.matrix} \title{Confusion Matrix} \usage{ confusion.matrix(obs, pred, threshold = 0.5) } \arguments{ \item{obs}{a vector of observed values which must be 0 for absences and 1 for occurrences} \item{pred}{a vector of the same length as \code{obs} representing the predicted values. Values must be between 0 & 1 prepresenting a likelihood.} \item{threshold}{a single threshold value between 0 & 1} } \value{ Returns a confusion matrix (table) of class 'confusion.matrix' representing counts of true & false presences and absences. } \description{ \code{confusion.matrix} calculates a confusion matrix. \cr \cr \bold{Note:} this method will exclude any missing data } \examples{ #create some data obs = c(sample(c(0,1),20,replace=TRUE),NA); obs = obs[order(obs)] pred = runif(length(obs),0,1); pred = pred[order(pred)] #calculate the confusion matrix confusion.matrix(obs,pred,threshold=0.5) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \seealso{ \code{\link{auc}}, \code{\link{Kappa}}, \code{\link{omission}}, \code{\link{sensitivity}}, \code{\link{specificity}}, \code{\link{prop.correct}}, \code{\link{accuracy}} } SDMTools/man/pnt.in.poly.Rd0000644000176000001440000000364412370121703015203 0ustar ripleyusers\name{pnt.in.poly} \alias{pnt.in.poly} \title{Point in Polygon} \usage{ pnt.in.poly(pnts, poly.pnts) } \arguments{ \item{pnts}{a 2-column matrix or dataframe defining locations of the points of interest} \item{poly.pnts}{a 2-column matrix or dataframe defining the locations of vertices of the polygon of interest} } \value{ A 3-column dataframe where the first 2 columns are the original locations of the points. The third column (names pip) is a vector of binary values where 0 represents points not with the polygon and 1 within the polygon. } \description{ \code{pnt.in.poly} works out if 2D points lie within the boundaries of a defined polygon. \cr \cr \bold{Note:} Points that lie on the boundaries of the polygon or vertices are assumed to be within the polygon. } \details{ The algorithm implements a sum of the angles made between the test point and each pair of points making up the polygon. The point is interior if the sum is 2pi, otherwise, the point is exterior if the sum is 0. This works for simple and complex polygons (with holes) given that the hole is defined with a path made up of edges into and out of the hole. \cr \cr This sum of angles is not able to consistently assign points that fall on vertices or on the boundary of the polygon. The algorithm defined here assumes that points falling on a boundary or polygon vertex are part of the polygon. } \examples{ #define the points and polygon pnts = expand.grid(x=seq(1,6,0.1),y=seq(1,6,0.1)) polypnts = cbind(x=c(2,3,3.5,3.5,3,4,5,4,5,5,4,3,3,3,2,2,1,1,1,1,2), y=c(1,2,2.5,2,2,1,2,3,4,5,4,5,4,3,3,4,5,4,3,2,2)) #plot the polygon and all points to be checked plot(rbind(polypnts, pnts)) polygon(polypnts,col='#99999990') #create check which points fall within the polygon out = pnt.in.poly(pnts,polypnts) head(out) #identify points not in the polygon with an X points(out[which(out$pip==0),1:2],pch='X') } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/quick.map.Rd0000644000176000001440000000557112370121703014704 0ustar ripleyusers\name{quick.map} \alias{quick.map} \title{Quick Map} \usage{ quick.map(sdm.asc, threshold, bkgd.col = "grey", cols = heat.colors(100), zlim = NULL, pnts = NULL, ...) } \arguments{ \item{sdm.asc}{an object of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} \item{threshold}{to indicate the threshold limit of \code{sdm.asc}} \item{bkgd.col}{to specify the background color} \item{cols}{a set of 2 or more colors to be used in the image and the gradient legend} \item{zlim}{to specify the upper an lower limits, which are going to be the labels of the gradient legend} \item{pnts}{location information for adding the \code{legend.gradient}} \item{...}{other graphical parameters defined by image() or plot()} } \value{ Nothing is returned, an image is created. } \description{ \code{quick.map} creates and displays an image, identifying the threshold as the background color, and create the gradient legend in the map. } \details{ An image is created of the map requested. A gradient legend (\code{\link{legend.gradient}}) will be added if \code{pnts} (the position of the legend) is specified. } \examples{ #create a matrix tmat = { matrix(c( 0,0,0,1,0,0,1,1,0,1, 0,0,1,0,1,0,0,0,0,0, 0,1,NA,1,0,1,0,0,0,1, 1,0,1,1,1,0,1,0,0,1, 0,1,0,1,0,1,0,0,0,1, 0,0,1,0,1,0,0,1,1,0, 1,0,0,1,0,0,1,0,0,0, 0,1,0,0,0,1,0,0,0,1, 0,0,1,1,1,0,0,1,1,1, 1,1,1,0,0,0,0,1,1,1),nr=10,byrow=TRUE) } #do the connected component labeling tasc = ConnCompLabel(tmat) #put in the gradient scale pnts = cbind(x =c(1.1,1.2,1.2,1.1), y =c(0.9,0.9,0.7,0.7)) # Set the map and gradient leyend colors tasc.col=colorRampPalette(c("yellow","orange", "red"))(5) #Create an image with the gradient legend quick.map(tasc,0.09,bkgd.col = 'darkgrey', cols=tasc.col, axes=FALSE, xlim=c(0.0,1.35)) ######################### # Create an image with two colors: below the threshold and # above the threshold # The next version of SDM Tools will let you create the legend.gradient # at a specific side of your image, and the user would not need to set # the coordinates. # To create the legend.gradient at the bottom left of your image without # setting up the coordinates at the image you can do this: xlim = c(-0.5,1) ylim = c(0,1) wid = diff(xlim)*0.05 ht = diff(ylim)*0.1 xvals = c(xlim[1]+wid,xlim[1]+2*wid,xlim[1]+2*wid,xlim[1]+wid) yvals = c(ylim[1]+ht,ylim[1]+ht,ylim[1]+2*ht,ylim[1]+2*ht) #Create the points for the legend.gradient pnts=(cbind(xvals,yvals)) # Set the images colors: above the threshold is black and # below the threshold is darkgrey. quick.map(tasc,0.09,bkgd.col = 'darkgrey', cols="black", axes=FALSE, xlim=c(-0.8, 1)) } \author{ Lorena Falconi \email{lorefalconi@gmail.com} } SDMTools/man/auc.Rd0000644000176000001440000000210212370121703013547 0ustar ripleyusers\name{auc} \alias{auc} \title{Area Under the Curve of the Reciever Operating Curve} \usage{ auc(obs, pred) } \arguments{ \item{obs}{a vector of observed values which must be 0 for absences and 1 for occurrences} \item{pred}{a vector of the same length as \code{obs} representing the predicted values. Values must be between 0 & 1 representing a likelihood.} } \value{ Returns a single value represting the AUC value. } \description{ \code{auc} estimates the AUC of the ROC using a Mann-Whitney U statistic. \cr \cr \bold{Note:} this method will exclude any missing data. } \examples{ #create some data obs = c(sample(c(0,1),20,replace=TRUE),NA) pred = runif(length(obs),0,1) #calculate AUC from the random data auc(obs,pred) #calculate an example 'perfect' AUC obs = obs[order(obs)] pred = pred[order(pred)] auc(obs,pred) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \seealso{ \code{\link{Kappa}}, \code{\link{omission}}, \code{\link{sensitivity}}, \code{\link{specificity}}, \code{\link{prop.correct}}, \code{\link{confusion.matrix}}, \code{\link{accuracy}} } SDMTools/man/put.data.Rd0000644000176000001440000000246712370121703014535 0ustar ripleyusers\name{put.data} \alias{put.data} \title{Spatial Join of Points with Raster Grids - replace data} \usage{ put.data(pts, x) } \arguments{ \item{pts}{a three-column data frame or matrix with the x and y coordinates of the locations of interest and the third column being the z values to put in the ascii grid file.} \item{x}{a raster matrix of class 'asc' (this and the adehabitat package)} } \value{ Returns a raster matrix of class 'asc' equal in size to input 'x'. } \description{ \code{put.data} replaces data in raster object of class 'asc' (this and adehabitat package) at specified locations.\cr \cr \bold{Note:} there is no interpolation done here. The values given replace the values of the raster cell the point falls into. } \details{ Implements a faster version of 'join.asc' from the adehabitat package. \cr \cr \bold{NOTE:} this assumes all locations are within the extent of the raster map. Values outside the extent will be given a value of NA. } \examples{ #create a simple object of class 'asc' tasc = as.asc(matrix(1:50,nr=50,nc=50)); print(tasc) \dontrun{image(tasc)} #define some point locations points = data.frame(x=runif(25,1,50),y=runif(25,1,50),z=50) #put the new data tasc = put.data(points,tasc) #show the data \dontrun{image(tasc)} } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/slope.Rd0000644000176000001440000000352112370121703014127 0ustar ripleyusers\name{slope} \alias{aspect} \alias{slope} \title{Slope and aspect calculations} \usage{ slope(mat, latlon = FALSE) aspect(mat, latlon = FALSE) } \arguments{ \item{mat}{a matrix of data representing z heights. Matrix can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} \item{latlon}{boolean value representing if the data is geographic.} } \value{ an object of the same class as \code{mat}. } \description{ \code{slope} and \code{aspect} calculates the slope and aspect of raster surfaces of class 'asc' (SDMTools & adehabitat packages), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package).\cr\cr Methods are based on Burrough and McDonell (1998). } \details{ Slope returns values representing the 'rise over run' with "run" units representing cellsize if \code{latlon}=FALSE or km if \code{latlon}=TRUE. This can be changed to percentage (multiply by 100) or to degrees by ATAN ( \code{output} ) * 57.29578.\cr\cr Aspect returns the direction (0 to 360) with North being 0. Values of -1 are flat areas with no slope or aspect.\cr\cr As this method requires information from the surrounding cells, missing data (NAs or edges) are populated with the value from the 'cell-of-interest'). } \examples{ #define a simple asc with some slope and direction tasc = as.asc(matrix(1:50,nr=10,nc=5),yll=75); tasc[,] slope(tasc)[,] #show the output of slope aspect(tasc)[,] #show the output of the aspect #define a FLAT simple asc tasc = as.asc(matrix(10,nr=10,nc=5),yll=75); tasc[,] slope(tasc)[,] #show the output of slope aspect(tasc)[,] #show the output of the aspect } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \references{ Burrough, P. A. and McDonell, R.A., 1998. Principles of Geographical Information Systems (Oxford University Press, New York), p. 190. } SDMTools/man/accuracy.Rd0000644000176000001440000000433112370121703014577 0ustar ripleyusers\name{accuracy} \alias{accuracy} \title{Measures of Model Accuracy} \usage{ accuracy(obs, pred, threshold = 0.5) } \arguments{ \item{obs}{a vector of observed values which must be 0 for absences and 1 for occurrences} \item{pred}{a vector of the same length as \code{obs} representing the predicted values. Values must be between 0 & 1 prepresenting a likelihood.} \item{threshold}{this can be: \cr a) a single value representing a single threshold between 0 & 1; \cr b) a vector of threshold values between 0 & 1; OR \cr c) an integer value representing the number of equal interval threshold values between 0 & 1} } \value{ a data.frame with seven columns: \item{threshold}{the threshold values representing each row of data} \item{AUC}{the AUC given the defined threshold value} \item{ommission.rate}{the ommission rate as a proportion of true occurrences misidentified given the defined threshold value} \item{sensitivity}{the sensitivity given the defined threshold value} \item{specificity}{the specificity given the defined threshold value} \item{prop.correct}{the proportion of the presence and absence records correctly identified given the defined threshold value} \item{Kappa}{the Kappa statistic of the model given the defined threshold value} } \description{ \code{accuracy} estimates six measures of accuracy for presence-absence or presence-psuedoabsence data. These include AUC, ommission rates, sensitivity, specificity, proportion correctly identified and Kappa. \cr \cr \bold{Note:} this method will exclude any missing data. } \examples{ #create some data obs = c(sample(c(0,1),20,replace=TRUE),NA); obs = obs[order(obs)] pred = runif(length(obs),0,1); pred = pred[order(pred)] #calculate accuracy of the model with a single threshold value accuracy(obs,pred,threshold=0.5) #calculate accuracy given several defined thresholds accuracy(obs,pred,threshold=c(0.33,0.5,0.66)) #calculate accuracy given a number of equal interval thresholds accuracy(obs,pred,threshold=20) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \seealso{ \code{\link{auc}}, \code{\link{Kappa}}, \code{\link{omission}}, \code{\link{sensitivity}}, \code{\link{specificity}}, \code{\link{prop.correct}}, \code{\link{confusion.matrix}} } SDMTools/man/lcmw.Rd0000644000176000001440000000363012370121703013750 0ustar ripleyusers\name{lcmw} \alias{lcmw} \title{Least Cost Moving Windows Calculation} \usage{ lcmw(mat, mw, mnc) } \arguments{ \item{mat}{a matrix of values that can be based on a raster dataset. Lower values should represent lower cost. The matrix can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} \item{mw}{a distance-cost matrix to be applied to each cell of 'mat'. This matrix can be dispersal costs. Lower values should represent lower cost.} \item{mnc}{an integer value representing the radius for 'mw' in number of cells.} } \value{ A matrix of values of the same dimensions and class as input \code{mat} } \description{ This is a moving window that for each cell returns the minimum 'cost' based on surrounding data cells and some dispersal distance cost. } \details{ This method moves over the matrix of values, summing the moving window cost \code{mw} and the matrix \code{mat}, returning the minimum cost value. This was created to estimate the least cost path through time for all cells in a matrix (see example). } \examples{ #create a simple object of class 'asc' tasc = as.asc(matrix(1:100,nr=10,nc=10)); print(tasc) #show the input matrix print(tasc[1:10,1:10]) #vary the moving windows ###no cost window of 2 cell radius tcost = matrix(0,nr=5,nc=5); print(tcost) out = lcmw(tasc, tcost, 2); print(out[1:10,1:10]) ###no cost with a circular radius of 2 tcost = matrix(NA,nr=5,nc=5) #populate the distances for (y in 1:5){ for (x in 1:5){ tcost[y,x] = sqrt((3-y)^2 + (3-x)^2) } } #remove distance values > max.num.cells tcost[which(tcost>2)]=NA #no cost matrix tcost1 = tcost; tcost1[is.finite(tcost1)]=1; print(tcost1) out = lcmw(tasc, tcost1, 2); print(out[1:10,1:10]) #linear cost tcost = tcost/2; print(tcost) out = lcmw(tasc, tcost, 2); print(out[1:10,1:10]) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/compare.matrix.Rd0000644000176000001440000000350612370121703015741 0ustar ripleyusers\name{compare.matrix} \alias{compare.matrix} \title{Biplot Comparison of Matrices} \usage{ compare.matrix(x, y, nbins, ...) } \arguments{ \item{x}{a matrix of data; the matrix can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} \item{y}{a matrix of data of the same extent, cell size and class as 'x'} \item{nbins}{number of equally spaced bins used to partition range of values in 'x' & 'y'} \item{...}{other graphical parameters defined by image(), contour(), or plot()} } \value{ Nothing is returned but images are created. } \description{ \code{compare.matrix} compares the values within two matrices (e.g., ESRI ArcInfo ASCII raster files) and produces a biplot that shows the frequency of each data combination shared between the matrices. The plot is overlayed with contour lines that demarcate parts of the the plot that share the same frequency of data combinations. \cr \cr \bold{NOTE:} it is assumed the matrices are of the same extent, cell size and scaled to be the same units. } \examples{ #create some simple objects of class 'asc' tasc = as.asc(matrix(rep(x=1:10, times=1000),nr=100)); print(tasc) #modify the asc objects so that they are slightly different tasc1 = tasc + runif(n = 10000, min = -1, max = 1) tasc2 = tasc + rnorm(n = 10000, mean = 1, sd = 1) #create some images #basic plot showing the density of data combinations shared #by the two matrices compare.matrix(tasc1,tasc2,20) #same as previous but with data partioned amoung more bins compare.matrix(tasc1,tasc2,50) #same as previous but altering the number of contour levels #and adding more graphical functions compare.matrix(tasc1,tasc2,50,nlevels=5, xlab='asc1',ylab='asc2', main='Comparison between asc and asc2', bg="grey") } \author{ Luke Shoo \email{luke.shoo@jcu.edu.au} } SDMTools/man/COGravity.Rd0000644000176000001440000000305312370121703014654 0ustar ripleyusers\name{COGravity} \alias{COGravity} \title{Centre of Gravity or Mass calculations for spatial data} \usage{ COGravity(x, y = NULL, z = NULL, wt = NULL) } \arguments{ \item{x}{a vector of e.g., longitudes or eastings, or a raster of class 'asc', 'RasterLayer' or 'SpatialGridDataFrame'.} \item{y}{a vector of e.g., latitudes or northings.} \item{z}{a vector of e.g., elevations.} \item{wt}{a vector or raster of class 'asc', 'RasterLayer' or 'SpatialGridDataFrame' representing weights for data.} } \value{ Returns a named vector of data representing the Centre of Gravity in x, y & z dimensions (depending on data supplied). } \description{ \code{COGravity} calculates the Centre of Gravity (or also known as Centre of Mass) for point or raster spatial data.\cr \cr \bold{Note:} NA data is automatically ommitted from analysis. } \details{ For raster-based data, if \code{wt} is missing, the values of the ascii are assumed to be the weights; otherwise, the values are assumed to be the \code{z} values. } \examples{ #create some points x = seq(154,110,length=25) y = seq(-10,-54,length=25) z = seq(100,200,length=25) wt = runif(25) #random weights #calculate the Centre of Gravity for these points COGravity(x,y,z,wt) #create a simple objects of class 'asc' x = as.asc(matrix(1:50,nr=50,nc=50)) wt = as.asc(matrix(runif(50),nr=50,nc=50)) #calculate COG with weighting defined in x COGravity(x) #calculate COG with weighting defined in wt (values in x are assumed elevation (z)) COGravity(x,wt=wt) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/optim.thresh.Rd0000644000176000001440000000467112370121703015440 0ustar ripleyusers\name{optim.thresh} \alias{optim.thresh} \title{Estimation of Optimal Threshold Values} \usage{ optim.thresh(obs, pred, threshold = 101) } \arguments{ \item{obs}{a vector of observed values which must be 0 for absences and 1 for occurrences} \item{pred}{a vector of the same length as \code{obs} representing the predicted values. Values must be between 0 & 1 representing a likelihood.} \item{threshold}{a single integer value representing the number of equal interval threshold values between 0 & 1} } \value{ Returns a list of the optimal thresholds for the different methods. If the list item is a single value, that is the optimal threshold but if two values are reported for the method, this represents the range in thresholds that are equal for that threshold selection method. \cr \cr The returned list includes the single or range in thresholds selected using the following methods: \item{min.occurence.prediction}{is the minimum prediction for the occurrence (presence) records} \item{mean.occurence.prediction}{is the mean prediction for the occurrence (presence) records} \item{'10.percent.omission'}{is the threshold value or range in values that excludes approx. 10 percent of the occurrence records} \item{'sensitivity=specificity'}{is the threshold value or range in values where sensitivity is equal to sensitivity} \item{'max.sensitivity+specificity'}{is the threshold value or range in values that maximizes sensitivity plus specificity} \item{maxKappa}{is the threshold value or range in values with the maximum Kappa statistic} \item{max.prop.correct}{is the threshold value or range in values with the maximum proportion of presence and absence records correctly identified} \item{min.ROC.plot.distance}{is the threshold value or range in values where the ROC curve is closest to point (0,1) (or perfect fit)} } \description{ \code{optim.thresh} estimates optimal threshold values given eight methods. \cr \cr \bold{Note:} this method will exclude any missing data. } \examples{ #create some data obs = c(sample(c(0,1),20,replace=TRUE),NA); obs = obs[order(obs)] pred = runif(length(obs),0,1); pred = pred[order(pred)] #calculate the optimal thresholds optim.thresh(obs,pred) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \seealso{ \code{\link{accuracy}}, \code{\link{auc}}, \code{\link{Kappa}}, \code{\link{omission}}, \code{\link{sensitivity}}, \code{\link{specificity}}, \code{\link{prop.correct}}, \code{\link{confusion.matrix}} } SDMTools/man/Istat.Rd0000644000176000001440000000421112370121703014066 0ustar ripleyusers\name{Istat} \alias{Istat} \title{I Similarity Statistic for Quantifying Niche Overlap} \usage{ Istat(x, y, old = FALSE) } \arguments{ \item{x}{a vector or matrix of data; the matrix can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} \item{y}{a vector or matrix of data with the same dimensions and class of 'x'} \item{old}{a boolean identifying if "old" equation is to be used (see description). This was kept for legacy issues.} } \value{ A single value that is the I similarity statistic } \description{ \code{Istat} computes the I similarity statistic of Warren et al. 2008. It is a method for defining niche overlap from predictions of species' distributions. \cr \cr \bold{NOTE:} it is assumed the input data are of the same extent and cellsize, and all values are positive. } \details{ The I similarity statistic sums the pair-wise differences between two predictions to create a single value representing the similarity of the two distributions. The I similarity statistic ranges from a value of 0, where two distributions have no overlap, to 1 where two distributions are identical (Warren et al., 2008). NOTE: updated to correct equation but not to worry about old... see explanation at \url{http://enmtools.blogspot.com.au/2010_09_01_archive.html}. } \examples{ #create some simple objects of class 'asc' tasc = as.asc(matrix(1:50,nr=50,nc=50)); print(tasc) #modify the asc objects so that they are slightly different tasc1 = tasc + runif(n = 2500, min = -1, max = 1) tasc2 = tasc + rnorm(n = 2500, mean = 1, sd = 1) #ensure all data is positive tasc1 = abs(tasc1) tasc2 = abs(tasc2) #calculate the I similarity statistic I = Istat(tasc1,tasc2) print(I) #high niche overlap #using a more variable map tasc2 = tasc + rnorm(n = 2500, mean = 25, sd = 15);tasc2 = abs(tasc2) I = Istat(tasc1,tasc2) print(I) #lower niche overlap } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \references{ Warren, D. L., R. E. Glor, M. Turelli, and D. Funk. 2008. Environmental Niche Equivalency versus Conservatism: Quantitative Approaches to Niche Evolution. Evolution 62:2868-2883. } SDMTools/man/distance.Rd0000644000176000001440000000620112370121703014575 0ustar ripleyusers\name{distance} \alias{distance} \title{Vincenty Direct Calculation of Distance and Direction} \source{ The source code for the distance algorithm here was modified from \url{http://www.movable-type.co.uk/scripts/latlong-vincenty.html}.\cr \cr Distances were validated against Geoscience Australia calculations (\url{http://www.ga.gov.au/geodesy/datums/vincenty_inverse.jsp}).\cr \cr Bearings were from multiple sources including \url{http://williams.best.vwh.net/avform.htm#Crs}. } \usage{ distance(lat1, lon1 = NULL, lat2 = NULL, lon2 = NULL, bearing = FALSE) } \arguments{ \item{lat1}{a single value or vector of values representing latitude in decimal degrees from -90 to 90 degrees. Alternatively, a data.frame or matrix can be used here with each column representing lat1, lon1, lat2, lon2 (in that order).} \item{lon1}{a single value or vector of values representing longitude in decimal degrees from -180 to 180 degrees. If NULL, lat1 is assumed to be a matrix or data.frame.} \item{lat2}{a single value or vector of values representing latitude in decimal degrees from -90 to 90 degrees. If NULL, lat1 is assumed to be a matrix or data.frame.} \item{lon2}{a single value or vector of values representing longitude in decimal degrees from -180 to 180 degrees. If NULL, lat1 is assumed to be a matrix or data.frame.} \item{bearing}{boolean value as to calculate the direction as well as the distance.} } \value{ Returns a data.frame with: \item{lon1}{the original longitude} \item{lat1}{the original latitude} \item{lon2}{the destination longitude} \item{lat2}{the destination latitude} \item{distance}{the distance used} \item{bearing}{if requested, the bearing between the two points} } \description{ \code{distance} estimates the distance given a starting & ending latitude and longitude. \cr \cr For general information on Vincenty's formula, see e.g., \url{http://en.wikipedia.org/wiki/Vincenty's_formulae}. It states: \cr \emph{Vincenty's formulae are two related iterative methods used in geodesy to calculate the distance between two points on the surface of an spheroid, developed by Thaddeus Vincenty in 1975. They are based on the assumption that the figure of the Earth is an oblate spheroid, and hence are more accurate than methods such as great-circle distance which assume a spherical Earth.} \cr \cr \bold{Note:} this method assumes a locations are lat & lon given in WGS 84.\cr\cr Direction, if requested, is the the initial bearing (sometimes referred to as forward azimuth) for which one would follow as a straight line along a great-circle arc from start to finish.\cr \cr \bold{Note:} this will fail if there are NA's in the data. } \examples{ #get the distance of 1 degree longitude at each 5 degrees latitude from -90 to 90 distance(lat1=seq(-90,90,5),lon1=rep(0,37),lat2=seq(-90,90,5),lon2=rep(1,37),bearing=TRUE) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \references{ Vincenty, T. 1975. Direct and Inverse Solutions of Geodesics on the Ellipsoid with application of Nested Equations. Survey Review, vol XXII no 176. \url{http://www.ngs.noaa.gov/PUBS_LIB/inverse.pdf} } \seealso{ \code{\link{destination}} } SDMTools/man/omission.Rd0000644000176000001440000000246112370121703014647 0ustar ripleyusers\name{omission} \alias{omission} \alias{prop.correct} \alias{sensitivity} \alias{specificity} \title{Measures of Accuracy} \usage{ omission(mat) sensitivity(mat) specificity(mat) prop.correct(mat) } \arguments{ \item{mat}{a confusion matrix of class 'confusion.matrix' from \code{confusion.matrix}} } \value{ returns single values representing the: \item{ommission}{the ommission rate as a proportion of true occurrences misidentified given the defined threshold value} \item{sensitivity}{the sensitivity given the defined threshold value} \item{specificity}{the specificity given the defined threshold value} \item{prop.correct}{the proportion of the presence and absence records correctly identified given the defined threshold value} } \description{ Estimates different measures of accurracy given a confusion matrix. } \examples{ #create some data obs = c(sample(c(0,1),20,replace=TRUE),NA); obs = obs[order(obs)] pred = runif(length(obs),0,1); pred = pred[order(pred)] #calculate the confusion matrix mat = confusion.matrix(obs,pred,threshold=0.5) #calculate the accuracy measures omission(mat) sensitivity(mat) specificity(mat) prop.correct(mat) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \seealso{ \code{\link{auc}}, \code{\link{Kappa}}, \code{\link{confusion.matrix}}, \code{\link{accuracy}} } SDMTools/man/circular.averaging.Rd0000644000176000001440000000300012370121703016543 0ustar ripleyusers\name{circular.averaging} \alias{circular.averaging} \alias{vector.averaging} \title{Circular Averaging based on Vector Averaging} \usage{ circular.averaging(direction, deg = TRUE) vector.averaging(direction, distance, deg = TRUE) } \arguments{ \item{direction}{a vector of directions given in degrees (0 - 360) if \code{deg}==TRUE or in radians if \code{deg}==FALSE} \item{distance}{a vector of distances associated with each direction} \item{deg}{a boolean object defining if \code{direction} is in degrees (TRUE) or radians (FALSE)} } \value{ \code{circular.averaging} returns the average direction while \code{vector.averaging} returns a list with 2 elements distance & direction } \description{ \code{circular.averaging} calculates the average direction (0 - 360) given a vector of directions.\cr\cr \code{vector.averaging} calculates the average distance and direction given a vector of directions and a vector of distances. } \details{ functions return NA if the average distance or direction is not valid... e.g., when averaging directions of 0 & 180 degrees, the result could theoretically be 90 or 270 but is practically neither. } \examples{ #EXAMPLE circular.averaging circular.averaging(c(0,90,180,270)) #result is NA circular.averaging(c(70,82,96,110,119,259)) #EXAMPLE vector.averaging vector.averaging(c(10,20,70,78,108), distance=10) vector.averaging(c(159,220,258,273,310),distance=runif(5)) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} & Lorena Falconi \email{lorefalconi@gmail.com} } SDMTools/man/ConnCompLabel.Rd0000644000176000001440000000344612370121703015467 0ustar ripleyusers\name{ConnCompLabel} \alias{ConnCompLabel} \title{Connected Components Labelling -- Unique Patch Labelling} \usage{ ConnCompLabel(mat) } \arguments{ \item{mat}{is a binary matrix of data with 0 representing background and 1 representing environment of interest. NA values are acceptable. The matrix can be a raster of class 'asc' (this & adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} } \value{ A matrix of the same dim and class of \code{mat} in which unique components (individual patches) are numbered 1:n with 0 remaining background value. } \description{ \code{ConnCompLabel} is a 1 pass implementation of connected components labelling. Here it is applied to identify disjunt patches within a distribution. \cr \cr The raster matrix can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package). } \examples{ #define a simple binary matrix tmat = { matrix(c( 0,0,0,1,0,0,1,1,0,1, 0,0,1,0,1,0,0,0,0,0, 0,1,NA,1,0,1,0,0,0,1, 1,0,1,1,1,0,1,0,0,1, 0,1,0,1,0,1,0,0,0,1, 0,0,1,0,1,0,0,1,1,0, 1,0,0,1,0,0,1,0,0,1, 0,1,0,0,0,1,0,0,0,1, 0,0,1,1,1,0,0,0,0,1, 1,1,1,0,0,0,0,0,0,1),nr=10,byrow=TRUE) } #do the connected component labelling ccl.mat = ConnCompLabel(tmat) ccl.mat image(t(ccl.mat[10:1,]),col=c('grey',rainbow(length(unique(ccl.mat))-1))) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \references{ Chang, F., C.-J. Chen, and C.-J. Lu. 2004. A linear-time component-labeling algorithm using contour tracing technique. Comput. Vis. Image Underst. 93:206-220. } \seealso{ \code{\link{PatchStat}}, \code{\link{ClassStat}} } SDMTools/man/ZonalStat.Rd0000644000176000001440000000506612370121703014732 0ustar ripleyusers\name{ZonalStat} \alias{ZonalStat} \title{Landscape Zonal Statistics} \usage{ ZonalStat(mat, zones, FUN = "all") } \arguments{ \item{mat}{a matrix of data to be summarized; The matrix can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package)} \item{zones}{a matrix of data with individual patches identified as with \code{ConnCompLabel}; The matrix must be of the same size & extent as \code{mat}} \item{FUN}{a single or vector of functions to be applied to each 'zone'; the default of 'all' will calculate min, 1st quarter, median, 3rd quarter, max, mean, standard deviation and n} } \value{ a data.frame listing \item{zone}{the unique ID for each zone.} \item{functions...}{a column for each of the functions identified} The data.frame will have an atribute defining the number of NA values that were excluded from the analysis. } \description{ \code{ZonalStat} calculates the statistics of data for specified zones of two matrices of data. The matrix can be a raster of class 'asc' (adehabitat package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package). } \details{ The code summarizes the data for defined zones. Nearly any function can be used for summarizing the data. \cr \cr The FUN defined with 'all' as one of or the only function will append the functions of min, 1st quarter, median, 3rd quarter, max, mean, standard deviation and n to what is being calculated. } \examples{ #define a simple binary matrix tmat = { matrix(c( 0,0,0,1,0,0,1,1,0,1, 0,0,1,0,1,0,0,0,0,0, 0,1,NA,1,0,1,0,0,0,1, 1,0,1,1,1,0,1,0,0,1, 0,1,0,1,0,1,0,0,0,1, 0,0,1,0,1,0,0,1,1,0, 1,0,0,1,0,0,1,0,0,1, 0,1,0,0,0,1,0,0,0,1, 0,0,1,1,1,0,0,0,0,1, 1,1,1,0,0,0,0,0,0,1),nr=10,byrow=TRUE) } #do the connected component labelling ccl.mat = ConnCompLabel(tmat) ccl.mat #this is the zone matrix to be used #create a random data matrix data.mat = matrix(runif(100),nr=10,nc=10) data.mat #calculate the zonal statistics zs.data = ZonalStat(data.mat,ccl.mat,FUN='all') zs.data #just calculate the sum zs.data = ZonalStat(data.mat,ccl.mat,FUN='sum') zs.data #calculate sum & n & 'all' and show when a function is not defined zs.data = ZonalStat(data.mat,ccl.mat, FUN=c('sum','length','not.a.function','all')) zs.data attr(zs.data,'excluded NAs') #show how many NAs were omitted from analysis } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } SDMTools/man/asc2dataframe.Rd0000644000176000001440000000456512370121703015513 0ustar ripleyusers\name{asc2dataframe} \alias{asc2dataframe} \alias{dataframe2asc} \title{Ascii Grid Files to Dataframe and Dataframe to Ascii Grid Files} \usage{ asc2dataframe(filenames, varnames = NULL, gz = FALSE) dataframe2asc(tdata, filenames = NULL, outdir = getwd(), gz = FALSE) } \arguments{ \item{filenames}{is a vector of file names} \item{varnames}{is a vector of names for the output columns, and must be the same length as files} \item{tdata}{is the data.frame which has y, x coordinates (OR lat,lon) and columns for the data to be output (MUST be in that order)} \item{outdir}{is the output directory, the default is the current working directory} \item{gz}{boolean defining if the ascii grid files are gzip compressed} } \value{ \item{asc2dataframe }{Returns a dataframe with XY coordinates and the data of each ascii grid files, as columns.} \item{dataframe2asc }{ Returns an asc grid file for each data column within the data.frame.} } \description{ \code{asc2dataframe} converts a list of Esri ascii grid formatted files to a data.frame consisting of only locations with data.\cr\cr \code{dataframe2asc} converts a data.frame or matrix with spatial data to Esri ascii grid formatted files. } \details{ asc2dataframe: The ascii grid files can be read in gzip compress format. The dataframe returned contains the X and Y coordinate columns followed by columns of data. dataframe2asc: If filenames is null, column names will be used. The data.frame has to contain the Y and X coordinates and the data as columns. The ascii grid files can be created as gzip compress format and would be saved in the outdir. } \examples{ #Create 2 ascii files y=seq(10,50,0.5) x=seq(140,180,0.5) cellsize=0.5 data1=sample(160,140) data2=sample(158,140) out1.asc=as.asc(matrix(data1,nc=y, nr=x), xll=min(x), yll=min(y), cellsize=cellsize) out2.asc=as.asc(matrix(data2,nc=y, nr=x), xll=min(x), yll=min(y), cellsize=cellsize) #write the ascii files to the work directory write.asc(out1.asc, 'out1.asc') write.asc(out2.asc, 'out2.asc') #list the ascii files ascfiles=c('out1.asc', 'out2.asc') #generate a dataframe from the ascii files tdata=asc2dataframe(ascfiles) tdata #remove the files unlink('out1.asc'); unlink('out2.asc') #convert the dataframe tdata to ascii grid files dataframe2asc(tdata) #remove the files unlink('var.1.asc'); unlink('var.2.asc') } \author{ Lorena Falconi \email{lorefalconi@gmail.com} } SDMTools/man/Kappa.Rd0000644000176000001440000000152112370121703014037 0ustar ripleyusers\name{Kappa} \alias{Kappa} \title{Kappa Statistic} \usage{ Kappa(mat) } \arguments{ \item{mat}{a confusion matrix of class 'confusion.matrix' from \code{confusion.matrix}} } \value{ Returns a single value represting the Kappa statistic. } \description{ \code{Kappa} estimates the Kappa statistic for model accuracy. } \examples{ #create some data obs = c(sample(c(0,1),20,replace=TRUE),NA); obs = obs[order(obs)] pred = runif(length(obs),0,1); pred = pred[order(pred)] #calculate the confusion matrix mat = confusion.matrix(obs,pred,threshold=0.5) #calculate the Kappa statistic Kappa(mat) } \author{ Jeremy VanDerWal \email{jjvanderwal@gmail.com} } \seealso{ \code{\link{auc}}, \code{\link{omission}}, \code{\link{sensitivity}}, \code{\link{specificity}}, \code{\link{prop.correct}}, \code{\link{confusion.matrix}}, \code{\link{accuracy}} } SDMTools/DESCRIPTION0000644000176000001440000000242313570411137013457 0ustar ripleyusersPackage: SDMTools Type: Package Title: Species Distribution Modelling Tools: Tools for processing data associated with species distribution modelling exercises Version: 1.1-221.2 Date: 2014-08-05 Author: Jeremy VanDerWal, Lorena Falconi, Stephanie Januchowski, Luke Shoo and Collin Storlie Maintainer: ORPHANED Imports: R.utils Suggests: adehabitat, raster, sp Description: This packages provides a set of tools for post processing the outcomes of species distribution modeling exercises. It includes novel methods for comparing models and tracking changes in distributions through time. It further includes methods for visualizing outcomes, selecting thresholds, calculating measures of accuracy and landscape fragmentation statistics, etc.. This package was made possible in part by financial support from the Australian Research Council & ARC Research Network for Earth System Science. License: GPL (>= 3) URL: http://www.rforge.net/SDMTools/ LazyLoad: yes Packaged: 2019-11-30 06:23:42 UTC; ripley NeedsCompilation: yes Repository: CRAN Date/Publication: 2019-11-30 06:55:27 UTC X-CRAN-Original-Maintainer: Jeremy VanDerWal X-CRAN-Comment: Orphaned and corrected on 2019-04-18 as check problems were not corrected in time. SDMTools/src/0000755000176000001440000000000013570405342012540 5ustar ripleyusersSDMTools/src/vincenty.geodesics.c0000644000176000001440000001330513456113636016516 0ustar ripleyusers/* File: vincenty.geodesics.c */ #include #include #include #include //define some global constants double a = 6378137, b = 6356752.3142, f = 1/298.257223563; // WGS-84 ellipsiod /* * Calculate destination point given start point lat/long (numeric degrees), * bearing (numeric degrees) & distance (in m). * * from: Vincenty direct formula - T Vincenty, "Direct and Inverse Solutions of Geodesics on the * Ellipsoid with application of nested equations", Survey Review, vol XXII no 176, 1975 * http://www.ngs.noaa.gov/PUBS_LIB/inverse.pdf */ SEXP Dest(SEXP latitude1, SEXP longitude1, SEXP bearing, SEXP distance) { //bring in the key data latitude1 = coerceVector(latitude1, REALSXP); double lat1 = REAL(latitude1)[0] * (PI/180); //first lat in radians longitude1 = coerceVector(longitude1, REALSXP); double lon1 = REAL(longitude1)[0] * (PI/180); //first lon in radians bearing = coerceVector(bearing, REALSXP); double alpha1 = REAL(bearing)[0] * (PI/180); //bearing in radians distance = coerceVector(distance, REALSXP); double s = REAL(distance)[0]; //distance in m //define all the variables double sinAlpha1, cosAlpha1, tanU1, cosU1, sinU1, sigma1, sinAlpha, cosSqAlpha; double uSq, A, B, sigma, sigmaP, cos2SigmaM, sinSigma, cosSigma, deltaSigma; double tmp, lat2, lambda, C, L, revAz; //start doing some of the calculations sinAlpha1 = sin(alpha1); cosAlpha1 = cos(alpha1); tanU1 = (1-f) * tan(lat1); cosU1 = 1 / sqrt((1 + tanU1*tanU1)); sinU1 = tanU1*cosU1; sigma1 = atan2(tanU1, cosAlpha1); sinAlpha = cosU1 * sinAlpha1; cosSqAlpha = 1 - sinAlpha*sinAlpha; uSq = cosSqAlpha * (a*a - b*b) / (b*b); A = 1 + uSq/16384*(4096+uSq*(-768+uSq*(320-175*uSq))); B = uSq/1024 * (256+uSq*(-128+uSq*(74-47*uSq))); sigma = s / (b*A), sigmaP = 2*PI; cos2SigmaM = cos(2*sigma1 + sigma); sinSigma = sin(sigma); cosSigma = cos(sigma); while (fabs(sigma-sigmaP) > 1e-12) { cos2SigmaM = cos(2*sigma1 + sigma); sinSigma = sin(sigma); cosSigma = cos(sigma); deltaSigma = B*sinSigma*(cos2SigmaM+B/4*(cosSigma*(-1+2*cos2SigmaM*cos2SigmaM)-B/6*cos2SigmaM*(-3+4*sinSigma*sinSigma)*(-3+4*cos2SigmaM*cos2SigmaM))); sigmaP = sigma; sigma = s / (b*A) + deltaSigma; } tmp = sinU1*sinSigma - cosU1*cosSigma*cosAlpha1; lat2 = atan2(sinU1*cosSigma + cosU1*sinSigma*cosAlpha1, (1-f)*sqrt(sinAlpha*sinAlpha + tmp*tmp)); lambda = atan2(sinSigma*sinAlpha1, cosU1*cosSigma - sinU1*sinSigma*cosAlpha1); C = f/16*cosSqAlpha*(4+f*(4-3*cosSqAlpha)); L = lambda - (1-C) * f * sinAlpha * (sigma + C*sinSigma*(cos2SigmaM+C*cosSigma*(-1+2*cos2SigmaM*cos2SigmaM))); revAz = atan2(sinAlpha, -tmp); // final bearing SEXP ans; PROTECT(ans = allocVector(REALSXP, 3)); REAL(ans)[0] = lat2 * (180 / PI); REAL(ans)[1] = lon1 * (180 / PI) + L * (180 / PI); REAL(ans)[2] = revAz * (180 / PI); UNPROTECT(1); return(ans); } /* * Calculates geodetic distance between two points specified by latitude/longitude using * Vincenty inverse formula for ellipsoids * * @param {Number} lat1, lon1: first point in decimal degrees * @param {Number} lat2, lon2: second point in decimal degrees * @returns (Number} distance in metres between points */ SEXP Dist(SEXP latitude1, SEXP longitude1, SEXP latitude2, SEXP longitude2) { //bring in the key data PROTECT(latitude1 = coerceVector(latitude1, REALSXP)); double *lat1 = REAL(latitude1); PROTECT(longitude1 = coerceVector(longitude1, REALSXP)); double *lon1 = REAL(longitude1); PROTECT(latitude2 = coerceVector(latitude2, REALSXP)); double *lat2 = REAL(latitude2); PROTECT(longitude2 = coerceVector(longitude2, REALSXP)); double *lon2 = REAL(longitude2); int npnts = length(latitude1); //get the number of points //setup the output vector and allocate everything as NA to begin with double *out; SEXP ans; PROTECT(ans = allocVector(REALSXP, npnts)); out = REAL(ans); //pointer to output dataset //cycle through each of the pairings and return the lengths int ii; for (ii=0;ii 1e-12 && --iterLimit>0); double uSq = cosSqAlpha * (a*a - b*b) / (b*b); double A = 1 + uSq/16384*(4096+uSq*(-768+uSq*(320-175*uSq))); double B = uSq/1024 * (256+uSq*(-128+uSq*(74-47*uSq))); double deltaSigma = B*sinSigma*(cos2SigmaM+B/4*(cosSigma*(-1+2*cos2SigmaM*cos2SigmaM)-B/6*cos2SigmaM*(-3+4*sinSigma*sinSigma)*(-3+4*cos2SigmaM*cos2SigmaM))); double s = b*A*(sigma-deltaSigma); if (iterLimit==0) s=-9999; // formula failed to converge out[ii] = s; } } UNPROTECT(5); return(ans); } SDMTools/src/patchstats.c0000644000176000001440000002033613570405051015063 0ustar ripleyusers/* this is code to calculate patch-based landscape statistics */ #include #include //global variables extern int nrow, ncol; extern int *data; /* tdata is a matrix of data with patches uniquely numbered IDs are the unique patch id values AREAS is the area of the cell in geographic coordinate systems TOPS, BOTTOMS AND SIDES are the lengths of cell perimeters in geographic coordinate systems */ //this is specific to projected coordinate systmes SEXP projectedPS(SEXP tdata, SEXP IDs) { //define the pointers for the data PROTECT(tdata = coerceVector(tdata, INTSXP)); data = INTEGER(tdata); //this is a raster matrix of patches int *dims = INTEGER(coerceVector(getAttrib(tdata, R_DimSymbol), INTSXP)); //get the dimension of the input matrix nrow = dims[0]; ncol = dims[1]; //assign the number of rows and columns in the matrix //define patch ids PROTECT(IDs = coerceVector(IDs, INTSXP)); int *ID = INTEGER(IDs); //this is the unique IDs of the patches int npatch = length(IDs); //setup temporary outputs SEXP ncells, ncellscore, nperimeters, ninternals; PROTECT(ncells = allocVector(INTSXP, npatch)); int *ncell = INTEGER(ncells); //number of cells per patch PROTECT(ncellscore = allocVector(INTSXP, npatch)); int *ncellcore = INTEGER(ncellscore); //number of core cells (core in 8 directions) PROTECT(nperimeters = allocVector(INTSXP, npatch)); int *nperim = INTEGER(nperimeters); //number of edges on teh perimeter PROTECT(ninternals = allocVector(INTSXP, npatch)); int *nintern = INTEGER(ninternals); // number of same patch shared edges //int ncell[npatch], ncellcore[npatch], nperim[npatch], nintern[npatch]; //set everything to 0 int ii,row,col; for (ii=0;ii0) ? data[(row-1)+nrow*(col)]:-9999; rook[1] = (col0) ? data[(row)+nrow*(col-1)]:-9999; queen[0] = (row>0 && col0) ? data[(row+1)+nrow*(col-1)]:-9999; queen[3] = (row>0 && col>0) ? data[(row-1)+nrow*(col-1)]:-9999; //cycle through and get temp values of edges for (ii=0;ii<4;ii++){if (tval==rook[ii]){ni++;} else {np++;}}; //check if cell is acore cell core=1; if (np==0){core=0;for (ii=0;ii<4;ii++){if (tval!=queen[ii]) core++;}} //assign the values to the proper patch id info for (ii=0;ii0) ? data[(row-1)+nrow*(col)]:-9999; rook[1] = (col0) ? data[(row)+nrow*(col-1)]:-9999; queen[0] = (row>0 && col0) ? data[(row+1)+nrow*(col-1)]:-9999; queen[3] = (row>0 && col>0) ? data[(row-1)+nrow*(col-1)]:-9999; //cycle through and get temp values of edges if (tval==rook[0]) { ni++; } else { np++;perim+=sides[col]; } if (tval==rook[1]) { ni++; } else { np++;perim+=tops[col]; } if (tval==rook[2]) { ni++; } else { np++;perim+=sides[col]; } if (tval==rook[3]) { ni++; } else { np++;perim+=bottoms[col]; } //check if cell is acore cell core=1; if (np==0){core=0;for (ii=0;ii<4;ii++){if (tval!=queen[ii]) core++;}} //assign the values to the proper patch id info for (ii=0;ii #include #include #include //function to get row minimums from a matrix SEXP writeascdata(SEXP nrows, SEXP ncols, SEXP xllcorner, SEXP yllcorner, SEXP cellsize, SEXP tdata, SEXP filename, SEXP sigdigits) { //define the pointers for the data PROTECT(tdata = coerceVector(tdata, REALSXP)); double *data = REAL(tdata); //this is a binary matrix of data int *dims = INTEGER(coerceVector(getAttrib(tdata, R_DimSymbol), INTSXP)); //get the dimension of the input matrix int nrow = dims[0]; int ncol = dims[1]; //assign the number of rows and columns in the matrix int sigdig = (int)REAL(sigdigits)[0]; // get the number of significant decimal points //write out the file FILE * fp; fp = fopen(CHAR(STRING_ELT(filename,0)),"w+"); //open the file fprintf(fp,"ncols %d\n",INTEGER(ncols)[0]); fprintf(fp,"nrows %d\n",INTEGER(nrows)[0]); fprintf(fp,"xllcorner %s\n",CHAR(STRING_ELT(xllcorner,0))); fprintf(fp,"yllcorner %s\n",CHAR(STRING_ELT(yllcorner,0))); fprintf(fp,"cellsize %s\n",CHAR(STRING_ELT(cellsize,0))); fprintf(fp,"NODATA_value -9999\n"); //cycle through and write out the data int row, col; for (col=(ncol-1); col >=0; --col) { for (row=0; row #include //global variables static int SearchDirection[8][2] = {{0,1},{1,1},{1,0},{1,-1},{0,-1},{-1,-1},{-1,0},{-1,1}}; int nrow, ncol; int *out, *data; /* tdata is a matrix of binary data 0 for background and 1 for foreground */ void Tracer(int *cy, int *cx, int *tracingdirection) { int i, y, x, tval; for(i = 0; i < 7; i++) { y = *cy + SearchDirection[*tracingdirection][0]; x = *cx + SearchDirection[*tracingdirection][1]; if (y>=0 && y=0 && x=0 && y=0 && x #include #include #include const double small_num = 0.00000000000001; /* The Slope & Aspect algorithm The rate of change (delta) of the surface in the horizontal (dz/dx) and vertical (dz/dy) directions from the center cell determines the slope. The basic algorithm used to calculate the slope is: slope_radians = ATAN ( sqrt ( [dz/dx]2 + [dz/dy]2 ) ) Slope is commonly measured in degrees, which uses the algorithm: slope_degrees = ATAN ( sqrt ( [dz/dx]2 + [dz/dy]2 ) ) * 57.29578 The slope algorithm can also be interpreted as: slope_degrees = ATAN (rise_run) * 57.29578 where: rise_run = sqrt ( [dz/dx]2 + [dz/dy]2 ] ) The values of the center cell and its eight neighbors determine the horizontal and vertical deltas. The neighbors are identified as letters from 'a' to 'i', with 'e' representing the cell for which the aspect is being calculated. a b c d e f g h i The rate of change in the x direction for cell 'e' is calculated with the algorithm: [dz/dx] = ((c + 2f + i) - (a + 2d + g) / (8 * x_cell_size) The rate of change in the y direction for cell 'e' is calculated with the following algorithm: [dz/dy] = ((g + 2h + i) - (a + 2b + c)) / (8 * y_cell_size) Taking the rate of change in both the x and y direction for cell 'e', aspect is calculated using: aspect = 57.29578 * atan2 ([dz/dy], -[dz/dx]) The aspect value is then converted to compass direction values (0–360 degrees), according to the following rule: if aspect < 0 { cell = 90.0 - aspect } else if aspect > 90.0 { cell = 360.0 - aspect + 90.0 } else { cell = 90.0 - aspect } Burrough, P. A. and McDonell, R.A., 1998. Principles of Geographical Information Systems (Oxford University Press, New York), p. 190. */ /* x is the data matrix with top is north, and right is east widths are the cell widths heights are the cell heights */ SEXP Slope (SEXP x, SEXP widths, SEXP heights) { //define the pointers for the data PROTECT(x = coerceVector(x, REALSXP)); //data values in the matrix PROTECT(widths = coerceVector(widths, REALSXP)); //data values in the matrix PROTECT(heights = coerceVector(heights, REALSXP)); //data values in the matrix double *data = REAL(x), *width = REAL(widths), *height = REAL(heights); //create pointers to the data int *dims = INTEGER(coerceVector(getAttrib(x, R_DimSymbol), INTSXP)); //get the dimension of the input matrix int nrows = dims[0]; int ncols = dims[1]; //assign the number of rows and columns in the matrix SEXP ans; PROTECT(ans = allocMatrix(REALSXP, nrows, ncols)); ;//setup the output double *out = REAL(ans); //pointer to output dataset double a,b,c,d,f,e,g,h,i; //neighboring cell values //cycle through the data of input matrix & calculate the slope for (int row=0; row 90.0) { out[row+nrows*col] = 360.0 - aspect + 90.0; } else { out[row+nrows*col] = 90.0 - aspect; } } } } } //return the output data UNPROTECT(4); return(ans); } SDMTools/src/movewindow.c0000644000176000001440000000310413456113636015105 0ustar ripleyusers/* File: movewindow.c */ #include #include SEXP movewindow(SEXP x, SEXP mwx, SEXP mwy, SEXP mwcost) { //define the pointers for the data PROTECT(x = coerceVector(x, REALSXP)); double *data = REAL(x); //suitability cost matrix double *cost = REAL(coerceVector(mwcost,REALSXP)); //moving window costs associated with distance int *X = INTEGER(coerceVector(mwx,INTSXP)), *Y = INTEGER(coerceVector(mwy,INTSXP)); //the shifts in positions associated with the moving window int *dims = INTEGER(coerceVector(getAttrib(x, R_DimSymbol), INTSXP)); //get the dimension of the input matrix int nrows = dims[0]; int ncols = dims[1]; //assign the number of rows and columns in the matrix //define other variables int mwshifts = length(mwcost); //get the number of moves in the moving window int row, col, prow, pcol, ii; double v; //setup the output double *out; SEXP ans; PROTECT(ans = allocMatrix(REALSXP, nrows, ncols)); out = REAL(ans); //pointer to output dataset //cycle through the data of input matrix for (row=0; row=0 && prow=0 && pcol #include double TWOPI = 2 * PI; double epsilon = 0.000000000001; // threshold value SEXP pip(SEXP pntx, SEXP pnty, SEXP pntn, SEXP polyx, SEXP polyy, SEXP polyn) { //define the pointers to the variables PROTECT(pntx = coerceVector(pntx, REALSXP)); double *ptx = REAL(pntx); // pnts x values PROTECT(pnty = coerceVector(pnty, REALSXP)); double *pty = REAL(pnty); // pnts y values PROTECT(pntn = coerceVector(pntn, INTSXP)); int npt = INTEGER(pntn)[0]; // number of points PROTECT(polyx = coerceVector(polyx, REALSXP)); double *plx = REAL(polyx); // polygon x values PROTECT(polyy = coerceVector(polyy, REALSXP)); double *ply = REAL(polyy); // polygon y values PROTECT(polyn = coerceVector(polyn, INTSXP)); int npl = INTEGER(polyn)[0]; // number of polygon points //printf("n points ... %d \n", npt); //printf("n poly points ... %d \n", npl); //define the output variables SEXP ans; int *out; PROTECT(ans = allocVector(INTSXP, npt)); out = INTEGER(ans); //pointer to output dataset //define some other variables int ii, jj; double x, x1, x2, y, y1, y2, dy, dx, dd; //cycle through the points for (ii=0;ii= y && y >= y2)) { angle = PI+1; break; } } // check point between two horizontal points if (y == y1 && y == y2) { if ((x1 <= x && x <= x2) || (x1 >= x && x >= x2)) { angle = PI+1; break; } } // check point between two verticle points dy = (y1==y2) ? -9999:(y1-y)/(y1-y2); //check if the relative change in x == relative change in y dx = (x1==x2) ? -9999:(x1-x)/(x1-x2); //check if the relative change in x == relative change in y dd = dy-dx; dd = (dd<0) ? -dd:dd; if (dd < epsilon && dy>0 && dy<1) { angle = PI+1; break; } // if dx == dy and dy is between 0 & 1 ... point is on the border line // && dy > 0 && dy < 1 //if not a vertex or on border lines... sum the angles double dtheta = atan2(y2 - y, x2 - x) - atan2(y1 - y, x1 - x); while (dtheta > PI) dtheta -= TWOPI; while (dtheta < -PI) dtheta += TWOPI; angle += dtheta; } //write out if point is in polygon if (fabs(angle) < PI) { out[ii] = 0; } else { out[ii] = 1; } } //return the output data UNPROTECT(7); return(ans); } SDMTools/src/getmin.c0000644000176000001440000000224113456113636014173 0ustar ripleyusers/* File: getmin.c */ #include #include SEXP getmin(SEXP x, SEXP y) { //define the pointers for the data PROTECT(x = coerceVector(x, REALSXP)); double *xdata = REAL(x); //suitability cost matrix PROTECT(y = coerceVector(y, REALSXP)); double *ydata = REAL(y); //suitability cost matrix int *dims = INTEGER(coerceVector(getAttrib(x, R_DimSymbol), INTSXP)); //get the dimension of the input matrix int nrows = dims[0]; int ncols = dims[1]; //assign the number of rows and columns in the matrix //define other variables int row, col; double xval, yval; //setup the output double *out; SEXP ans; PROTECT(ans = allocMatrix(REALSXP, nrows, ncols)); out = REAL(ans); //pointer to output dataset //cycle through the data of input matrix for (row=0; row n[ii]*(1+n[ii])) minp[ii] = 4 * n[ii] + 4 } return(p/minp) } #check if raster from sp or raster package and convert if necessary if (any(class(mat) %in% 'RasterLayer')) mat = asc.from.raster(mat) if (any(class(mat) == 'SpatialGridDataFrame')) mat = asc.from.sp(mat) #if latlon data if (latlon){ if (!any(class(mat) == 'asc')) stop('matrix must be of class asc, RasterLayer or SpatialGridDataFrame... see helpfile') #get the cell size info cellinfo = grid.info(getXYcoords(mat)$y,attr(mat,'cellsize')) #check to ensure matrix mat = try(as.matrix(mat)) #get the unique patch ID's ID.vals = as.numeric(na.omit(unique(as.vector(mat))));ID.vals = ID.vals[order(ID.vals)] #extract the base patch info out = as.data.frame(.Call('geographicPS',mat,ID.vals,cellinfo$area,cellinfo$top,cellinfo$bottom,cellinfo$side,PACKAGE='SDMTools')) names(out) = c('patchID','n.cell','n.core.cell','n.edges.perimeter','n.edges.internal','area','core.area','perimeter') } else { #check to ensure matrix mat = try(as.matrix(mat)) if (!is.matrix(mat)) stop('objects must be a matrix') #get the unique patch ID's ID.vals = as.numeric(na.omit(unique(as.vector(mat))));ID.vals = ID.vals[order(ID.vals)] #extract the base patch info out = as.data.frame(.Call('projectedPS',mat,ID.vals,PACKAGE='SDMTools')) names(out) = c('patchID','n.cell','n.core.cell','n.edges.perimeter','n.edges.internal') #calculate other stats out$area = out$n.cell * cellsize^2 out$core.area = out$n.core.cell * cellsize^2 out$perimeter = out$n.edges.perimeter * cellsize } out$perim.area.ratio = out$perimeter / out$area out$shape.index = shape.index(out$n.cell,out$n.edges.perimeter) out$frac.dim.index = (2 * log(0.25 * out$perimeter)) / log(out$area) out$core.area.index = out$core.area / out$area return(out) } SDMTools/R/getXYcoords.R0000644000176000001440000000237012370121703014542 0ustar ripleyusers#' Computes the X and Y Coordinates of the Pixels of a Raster Map #' #' \code{getXYcoords} computes the geographical coordinates of the rows and #' columns of pixels of a raster map of class \code{asc}. Code & helpfile were #' modified from adehabitat package. #' #' #' @param w an object of class \code{asc}. #' @return Returns a list with two components: \item{x}{the x coordinates of #' the columns of pixels of the map} \item{y}{the y coordinates of the rows of #' pixels of the map} #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @examples #' #' #' tasc = as.asc(matrix(rep(x=1:10, times=1000),nr=100)); print(tasc) #' getXYcoords(tasc) #' #' #' @export "getXYcoords" <- function(w) { #check if raster from sp or raster package and convert if necessary if (any(class(w) %in% 'RasterLayer')) w = asc.from.raster(w) if (any(class(w) == 'SpatialGridDataFrame')) w = asc.from.sp(w) if (!inherits(w, "asc")) stop("must be of class asc") # Gets the attributes cs<-attr(w, "cellsize") xll<-attr(w, "xll") yll<-attr(w, "yll") ## Computation of the number of rows and columns of the matrix nr<-nrow(w) nc<-ncol(w) ## The results x<-xll+c(0:(nr-1))*cs y<-yll+c(0:(nc-1))*cs return(list(x=x, y=y)) } SDMTools/R/SigDiff.R0000644000176000001440000001266712370121703013615 0ustar ripleyusers#' Identify Regions of Significant Differences #' #' \code{SigDiff} computes the significance of the pairwise differences #' relative to the mean and variance of all differences between the two input #' datasets. This is useful for identifying regions of significant difference #' between two datasets (e.g., different DEMs (Januchowski et al. 2010) or #' different species distribution model predictions (Bateman et al 2010)). \cr #' \cr \code{ImageDiff} is a wrapper to the image.asc command in adehabitat #' package that uses the result from \code{SigDiff} to create an image mapping #' the regions of significant differences (positive and negative). \cr \cr #' \bold{NOTE:} it is assumed the input data are of the same extent and #' cellsize. #' #' #' @param x a vector or matrix of data; the matrix can be of can be a raster of #' class 'asc' (adehabitat package), 'RasterLayer' (raster package) or #' 'SpatialGridDataFrame' (sp package) #' @param y a vector or matrix of data with the same dimensions and class of #' 'x' #' @param pattern logical value defining if differences are respective to #' relative patterning (TRUE) or absolute values (FALSE) #' @param tasc a matrix of probability values (0 to 1) likely created by #' \code{SigDiff}; The matrix can be a raster of class 'asc' (adehabitat #' package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp #' package) #' @param sig.levels the significance levels to define significantly above and #' below. Default settings represent significance at the 0.05 level #' @param tcol a set of 3 colors for use in the image to represent #' significantly lower or greater, and not significant #' @param ... other graphical parameters defined by image() or plot() #' @return \code{SigDiff} returns a vector or matrix of the same dimensions and #' class of the input representing the significance of the pairwise difference #' relative to the mean and variance of all differences between the two inputs. #' \cr \cr \code{ImageDiff} returns nothing but creates an image of the areas #' of significant differences #' @author Stephanie Januchowski \email{stephierenee@@gmail.com} #' @references Januchowski, S., Pressey, B., Vanderwal, J. & Edwards, A. (2010) #' Characterizing errors in topographic models and estimating the financial #' costs of accuracy. International Journal of Geographical Information #' Science, In Press. \cr \cr Bateman, B.L., VanDerWal, J., Williams, S.E. & #' Johnson, C.N. (2010) Inclusion of biotic interactions in species #' distribution models improves predictions under climate change: the northern #' bettong Bettongia tropica, its food resources and a competitor. Journal of #' Biogeography, In Review. #' @examples #' #create some simple objects of class 'asc' #' tasc = as.asc(matrix(1:50,nr=50,nc=50)); print(tasc) #' #modify the asc objects so that they are slightly different #' tasc1 = tasc + runif(n = 2500, min = -1, max = 1) #' tasc2 = tasc + rnorm(n = 2500, mean = 1, sd = 1) #' #' #create graphical representation #' par(mfrow=c(2,2),mar=c(1,1,4,1)) #' image(tasc1,main='first grid',axes=FALSE) #' image(tasc2,main='second grid',axes=FALSE) #' #' #get significant difference by spatial patterning #' out = SigDiff(tasc1,tasc2) #' ImageDiff(out,main="Pattern Differences",axes=FALSE) #' #' #get significant difference #' out = SigDiff(tasc1,tasc2,pattern=FALSE) #' ImageDiff(out,main="Absolute Differences",axes=FALSE) #' legend('topleft',legend=c('-ve','ns','+ve'),title='significance', #' fill=terrain.colors(3),bg='white') #' #' #' @export SigDiff = function(x,y,pattern=TRUE){ #check input for class for returning info if (class(x) == 'asc') { attrib = attributes(x) } else if (any(class(x) %in% 'RasterLayer')) { attrib = x; x = asc.from.raster(x); y = asc.from.raster(y) } else if (any(class(x) == 'SpatialGridDataFrame')) { attrib = x; x = asc.from.sp(x); y = asc.from.sp(y) } else { attrib = attributes(x) } if(length(which(dim(x)==dim(y)))!=2) stop('asc objects must be of the same extent')#confirm same extents pos = which(is.finite(x)) #positions in the data which have a value (are not NA) if(pattern) { px = x[pos]/sum(x[pos]) } else { px = x[pos] } #calculate the proportionate value relative to the sum of values across the raster if(pattern) { py = y[pos]/sum(y[pos]) } else { py = y[pos] } #calculate the proportionate value relative to the sum of values across the raster diff.xy = px-py diff.xy=scale(diff.xy) #create z-scores t.sig = pnorm(diff.xy) #get the significance values of the z-scores out = x; out[pos] = t.sig #create the output ascii grid and write significance values to it #reset the attributes of the input if (any(class(attrib) %in% 'RasterLayer')) { attrib = setValues(attrib, as.vector(t(t(unclass(out))[dim(out)[2]:1,]))); return(attrib) } else if (any(class(attrib) == 'SpatialGridDataFrame')) { attrib@data[1] = as.vector(unclass(out)[,dim(out)[2]:1]); return(attrib) } else { attributes(out) = attrib; return(out) } } #' @rdname SigDiff #' @export ImageDiff = function(tasc,sig.levels=c(0.025,0.975),tcol=terrain.colors(3),...){ #check if raster from sp or raster package and convert if necessary if (any(class(tasc) %in% 'RasterLayer')) tasc = asc.from.raster(tasc) if (any(class(tasc) == 'SpatialGridDataFrame')) tasc = asc.from.sp(tasc) tasc[which(is.finite(tasc) & tasc<=sig.levels[1])] = 9 tasc[which(is.finite(tasc) & tasc>sig.levels[1] & tascthreshold)) stop('Cannot be 0') #rework sdm.asc sdm.asc[which(!is.na(sdm.asc) & sdm.asc<=threshold)] = 0 tvals = seq(threshold,trange[2],(trange[2]-threshold)/(length(cols)+2)) for (i in 1:length(cols)) sdm.asc[which(!is.na(sdm.asc) & sdm.asc=tvals[i+1])] = 0 #create the image if (is.null(zlim)) { lim = range(sdm.asc,na.rm=T) } else { lim = zlim } image(sdm.asc, col=c(bkgd.col,cols), xlab="", ylab="",zlim=lim,...) #add the scale legend if (!is.null(pnts)) legend.gradient(pnts,cols=c(bkgd.col,cols),limits=lim) } SDMTools/R/circular.averaging.R0000644000176000001440000000550712370121703016043 0ustar ripleyusers#' Circular Averaging based on Vector Averaging #' #' \code{circular.averaging} calculates the average direction (0 - 360) given a #' vector of directions.\cr\cr \code{vector.averaging} calculates the average #' distance and direction given a vector of directions and a vector of #' distances. #' #' functions return NA if the average distance or direction is not valid... #' e.g., when averaging directions of 0 & 180 degrees, the result could #' theoretically be 90 or 270 but is practically neither. #' #' @param direction a vector of directions given in degrees (0 - 360) if #' \code{deg}==TRUE or in radians if \code{deg}==FALSE #' @param distance a vector of distances associated with each direction #' @param deg a boolean object defining if \code{direction} is in degrees #' (TRUE) or radians (FALSE) #' @return \code{circular.averaging} returns the average direction while #' \code{vector.averaging} returns a list with 2 elements distance & direction #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} & Lorena Falconi #' \email{lorefalconi@@gmail.com} #' @examples #' #' #EXAMPLE circular.averaging #' circular.averaging(c(0,90,180,270)) #result is NA #' circular.averaging(c(70,82,96,110,119,259)) #' #' #EXAMPLE vector.averaging #' vector.averaging(c(10,20,70,78,108), distance=10) #' vector.averaging(c(159,220,258,273,310),distance=runif(5)) #' #' @export circular.averaging = function(direction,deg=TRUE) { n=length(direction) #get the length of direction vector out = vector.averaging(direction=direction,distance=rep(1,n),deg=deg) return(out$direction) } #' @rdname circular.averaging #' @export vector.averaging = function(direction,distance,deg=TRUE) { if (deg) direction = direction*pi/180 #convert to radians n=length(direction) #get the length of direction vector if (any(is.na(direction))) { #ensure no NA data warning('NAs in data'); pos = which(is.na(direction)); direction = direction[-pos]; distance = distance[-pos] } else { sinr <- sum(sin(direction)) cosr <- sum(cos(direction)) if (sqrt((sinr^2 + cosr^2))/n > .Machine$double.eps) { Ve = sum(distance*sin(direction))/n Vn = sum(distance*cos(direction))/n UV = sqrt(Ve^2 + Vn^2) AV1 = atan(Ve/Vn) AV2 = atan2(sinr, cosr) #perform some checks and correct when output in wrong quadrant AV = NULL if (abs(AV1-AV2) <= .Machine$double.eps) { #if both methods of determining directions are reporting the same values if (AV1>0) AV = AV1 if (AV1<0) AV = 2 * pi + AV1 } else { #case when they are different values... add necessary values of pi if (AV1>0) AV = AV1 + pi if (AV1<0) AV = AV2 } if (is.null(AV)) { return(list(distance=NA,direction=NA)) } else { if (deg) AV = AV * 180 / pi #convert back to degrees return(list(distance=UV,direction=AV)) } } else { return(list(distance=NA,direction=NA)) } } } SDMTools/R/Kappa.R0000644000176000001440000000224112370121703013321 0ustar ripleyusers#' Kappa Statistic #' #' \code{Kappa} estimates the Kappa statistic for model accuracy. #' #' #' @param mat a confusion matrix of class 'confusion.matrix' from #' \code{confusion.matrix} #' @return Returns a single value represting the Kappa statistic. #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @seealso \code{\link{auc}}, \code{\link{omission}}, #' \code{\link{sensitivity}}, \code{\link{specificity}}, #' \code{\link{prop.correct}}, \code{\link{confusion.matrix}}, #' \code{\link{accuracy}} #' @examples #' #' #' #create some data #' obs = c(sample(c(0,1),20,replace=TRUE),NA); obs = obs[order(obs)] #' pred = runif(length(obs),0,1); pred = pred[order(pred)] #' #' #calculate the confusion matrix #' mat = confusion.matrix(obs,pred,threshold=0.5) #' #' #calculate the Kappa statistic #' Kappa(mat) #' #' #' @export Kappa <- function(mat){ #input checks if (attr(mat,'class')!='confusion.matrix') stop('mat must be of class confusion.matrix') #calculate Kappa n<-sum(mat) colsums<-as.double(apply(mat,2,sum)) rowsums<-as.double(apply(mat,1,sum)) t1 = sum(diag(mat))/n; t2 = sum(rowsums*colsums)/(n^2) #return the value return((t1-t2)/(1-t2)) } SDMTools/R/asc2dataframe.R0000644000176000001440000001142012370121703014761 0ustar ripleyusers#' Ascii Grid Files to Dataframe and Dataframe to Ascii Grid Files #' #' \code{asc2dataframe} converts a list of Esri ascii grid formatted files to a #' data.frame consisting of only locations with data.\cr\cr #' \code{dataframe2asc} converts a data.frame or matrix with spatial data to #' Esri ascii grid formatted files. #' #' asc2dataframe: The ascii grid files can be read in gzip compress format. The #' dataframe returned contains the X and Y coordinate columns followed by #' columns of data. #' #' dataframe2asc: If filenames is null, column names will be used. The #' data.frame has to contain the Y and X coordinates and the data as columns. #' The ascii grid files can be created as gzip compress format and would be #' saved in the outdir. #' #' @param filenames is a vector of file names #' @param varnames is a vector of names for the output columns, and must be the #' same length as files #' @param tdata is the data.frame which has y, x coordinates (OR lat,lon) and #' columns for the data to be output (MUST be in that order) #' @param outdir is the output directory, the default is the current working #' directory #' @param gz boolean defining if the ascii grid files are gzip compressed #' @return \item{asc2dataframe }{Returns a dataframe with XY coordinates and #' the data of each ascii grid files, as columns.} \item{dataframe2asc }{ #' Returns an asc grid file for each data column within the data.frame.} #' @author Lorena Falconi \email{lorefalconi@@gmail.com} #' @examples #' #' #Create 2 ascii files #' y=seq(10,50,0.5) #' x=seq(140,180,0.5) #' cellsize=0.5 #' data1=sample(160,140) #' data2=sample(158,140) #' out1.asc=as.asc(matrix(data1,nc=y, nr=x), xll=min(x), yll=min(y), cellsize=cellsize) #' out2.asc=as.asc(matrix(data2,nc=y, nr=x), xll=min(x), yll=min(y), cellsize=cellsize) #' #write the ascii files to the work directory #' write.asc(out1.asc, 'out1.asc') #' write.asc(out2.asc, 'out2.asc') #' #list the ascii files #' ascfiles=c('out1.asc', 'out2.asc') #' #generate a dataframe from the ascii files #' tdata=asc2dataframe(ascfiles) #' tdata #' #' #remove the files #' unlink('out1.asc'); unlink('out2.asc') #' #' #convert the dataframe tdata to ascii grid files #' dataframe2asc(tdata) #' #' #remove the files #' unlink('var.1.asc'); unlink('var.2.asc') #' #' @export asc2dataframe = function(filenames,varnames=NULL,gz=FALSE) { #check values if (is.null(varnames)) { varnames = paste('var',1:length(filenames),sep='.') #if no variable names defined, create them } else { if (length(varnames)!=length(filenames)) stop('variable names must be the same length as the files vector') varnames = as.character(varnames) } out = NULL #define the output for (ii in 1:length(filenames)) { #cycle through each of the files tfile = filenames[ii]; varname = varnames[ii] #define the file and variable name cat('working with',tfile,'...\n') if (!file.exists(tfile)) { warning(paste(tfile,'does not exist and was not used')); next } #check if the file exists, if not move to the next file tasc = read.asc(tfile,gz=gz) #read in the ascii grid file if (is.null(out)) { #if out is still null, populate the row/col/x,y data out = as.data.frame(which(is.finite(tasc),arr.ind=T)) #get row column info for actual data out$y = getXYcoords(tasc)$y[out$col]#extract the longitudes out$x = getXYcoords(tasc)$x[out$row] #extract the latitudes } out[varname] = tasc[cbind(out$row,out$col)] #append the actual data } if (is.null(out)) { #if out is still null warning('no data was extracted...'); return(NA) } else { #if out has some data out = na.omit(out) #remove any missing data out$row = out$col = NULL #remove the row/col info attr(out,'filenames') = list(filenames,names=varnames) #set an atrtribute relating filenames with variable names #names(attr(out,'filenames')) = varnames #define the names of the attribute list return(out) } } #' @rdname asc2dataframe #' @export dataframe2asc = function(tdata,filenames=NULL,outdir=getwd(),gz=FALSE) { #check values if (is.null(filenames)) { filenames = colnames(tdata)[3:length(tdata)] #if no variable names defined, create them } else { if (length(filenames)!=length(3:length(tdata))) stop('variable names must be the same length as the files vector') filenames = as.character(filenames) } for (ii in 3:(length(tdata))) { #cycle through each of the files lats=unique(tdata[,1]);lats=sort(lats); longs=unique(tdata[,2]);longs=sort(longs) cellsize = min(c(diff(lats),diff(longs))) # set cell size nc=ceiling((max(lats)-min(lats))/cellsize)+1; nr=ceiling((max(longs)-min(longs))/cellsize)+1 out.asc=as.asc(matrix(NA,nrow=nr,ncol=nc),xll=min(longs),yll=min(lats),cellsize=cellsize) out.asc = put.data(tdata[,c(2:1,ii)],out.asc) write.asc(out.asc,paste(outdir,'/',filenames[ii-2],sep=''),gz=gz) #put the name and extention } } SDMTools/R/optim.thresh.R0000644000176000001440000001163412370121703014717 0ustar ripleyusers#' Estimation of Optimal Threshold Values #' #' \code{optim.thresh} estimates optimal threshold values given eight methods. #' \cr \cr \bold{Note:} this method will exclude any missing data. #' #' #' @param obs a vector of observed values which must be 0 for absences and 1 #' for occurrences #' @param pred a vector of the same length as \code{obs} representing the #' predicted values. Values must be between 0 & 1 representing a likelihood. #' @param threshold a single integer value representing the number of equal #' interval threshold values between 0 & 1 #' @return Returns a list of the optimal thresholds for the different methods. #' If the list item is a single value, that is the optimal threshold but if two #' values are reported for the method, this represents the range in thresholds #' that are equal for that threshold selection method. \cr \cr The returned #' list includes the single or range in thresholds selected using the following #' methods: \item{min.occurence.prediction}{is the minimum prediction for the #' occurrence (presence) records} \item{mean.occurence.prediction}{is the mean #' prediction for the occurrence (presence) records} #' \item{'10.percent.omission'}{is the threshold value or range in values that #' excludes approx. 10 percent of the occurrence records} #' \item{'sensitivity=specificity'}{is the threshold value or range in values #' where sensitivity is equal to sensitivity} #' \item{'max.sensitivity+specificity'}{is the threshold value or range in #' values that maximizes sensitivity plus specificity} \item{maxKappa}{is the #' threshold value or range in values with the maximum Kappa statistic} #' \item{max.prop.correct}{is the threshold value or range in values with the #' maximum proportion of presence and absence records correctly identified} #' \item{min.ROC.plot.distance}{is the threshold value or range in values where #' the ROC curve is closest to point (0,1) (or perfect fit)} #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @seealso \code{\link{accuracy}}, \code{\link{auc}}, \code{\link{Kappa}}, #' \code{\link{omission}}, \code{\link{sensitivity}}, #' \code{\link{specificity}}, \code{\link{prop.correct}}, #' \code{\link{confusion.matrix}} #' @examples #' #' #' #create some data #' obs = c(sample(c(0,1),20,replace=TRUE),NA); obs = obs[order(obs)] #' pred = runif(length(obs),0,1); pred = pred[order(pred)] #' #' #calculate the optimal thresholds #' optim.thresh(obs,pred) #' #' #' @export optim.thresh optim.thresh = function(obs,pred,threshold=101) { #input checks if (length(obs)!=length(pred)) stop('this requires the same number of observed & predicted values') if (length(threshold)==1 & threshold[1]>1) { thresholds = seq(0,1,length=threshold) } else { stop('inappropriate threshold value. See help file') } #deal with NAs if (length(which(is.na(c(obs,pred))))>0) { na = union(which(is.na(obs)),which(is.na(pred))) warning(length(na),' data points removed due to missing data') obs = obs[-na]; pred = pred[-na] } #define the n's and do checks n = length(obs); if (length(which(obs %in% c(0,1)))!=n) stop('observed values must be 0 or 1') #ensure observed are values 0 or 1 #calculate the accuracy information for the data ac = accuracy(obs,pred,threshold) #define the output list out = list() #calculate each of the optimum thresholds #min prediction of occurrences out$min.occurence.prediction = min(pred[which(obs==1)]) #mean occurance prediction out$mean.occurence.prediction = mean(pred[which(obs==1)]) #10% omission rate t.thresh = ac$threshold[which(rank(abs(0.1 - ac$omission),ties.method="min")==1)] #get the differences from calculated omission rates to 10% if (length(t.thresh)>1) { out$'10.percent.omission' = range(t.thresh) } else { out$'10.percent.omission' = t.thresh } #sensitivity = specificity t.thresh = ac$threshold[which(rank(abs(ac$sensitivity - ac$specificity),ties.method="min")==1)] if (length(t.thresh)>1) { out$'sensitivity=specificity' = range(t.thresh) } else { out$'sensitivity=specificity' = t.thresh } #max sensitivity + specificity t.thresh = ac$threshold[which(rank(-(ac$sensitivity + ac$specificity),ties.method="min")==1)] if (length(t.thresh)>1) { out$'max.sensitivity+specificity' = range(t.thresh) } else { out$'max.sensitivity+specificity' = t.thresh } #max Kappa t.thresh = ac$threshold[which(rank(-(ac$Kappa),ties.method="min")==1)] if (length(t.thresh)>1) { out$maxKappa = range(t.thresh) } else { out$maxKappa = t.thresh } #Max prop correct t.thresh = ac$threshold[which(rank(-(ac$prop.correct),ties.method="min")==1)] if (length(t.thresh)>1) { out$max.prop.correct = range(t.thresh) } else { out$max.prop.correct = t.thresh } #min ROC plot distance t.thresh = ac$threshold[which(rank((1-ac$sensitivity)^2 + (ac$specificity-1)^2,ties.method="min")==1)] if (length(t.thresh)>1) { out$min.ROC.plot.distance = range(t.thresh) } else { out$min.ROC.plot.distance = t.thresh } #return the values return(out) } SDMTools/R/Scalebar.R0000644000176000001440000000313212370121703014001 0ustar ripleyusers#' Scalebar for Projected Maps #' #' \code{Scalebar} adds a distance scalebar onto a projected map. It is not #' appropriate for geographic projections. #' #' #' @param x the x-axis position for the lower left corner of the bar #' @param y the x-axis position for the lower left corner of the bar #' @param distance the distance for which the scale bar should represent #' @param unit the units to report as the scaling #' @param scale the scaling factor to rescale the distance to a different unit. #' e.g., if your map is in m and want the scalebar to be in km, use a scale of #' 0.01 #' @param t.cex the scaling of the font size to be used for the scalebar #' @return nothing is returned, simply a scalebar is added to a plot. #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @examples #' #' #' #create a simple object of class 'asc' #' tasc = as.asc(matrix(1:50,nr=50,nc=50)); print(tasc) #' #' #plot the image #' image(tasc,axes=FALSE,ann=FALSE) #' #' #add a distance scalebar #' Scalebar(x=5,y=5,distance=20) #show values in km #' Scalebar(x=5,y=10,distance=20,unit='m',scale=1000) #show values in meters #' #' #' @export Scalebar = function(x,y,distance,unit='km',scale=1,t.cex=0.8) { xvals = distance*c(0,0.25,0.5,0.75,1)+x yvals = c(0,distance/c(30,20,10))+y cols <- c("black","white","black","white") for (i in 1:4) rect(xvals[i],yvals[1],xvals[i+1],yvals[2],col=cols[i]) for (i in 1:5) segments(xvals[i],yvals[2],xvals[i],yvals[3]) labels <- c((xvals[c(1,3)]-xvals[1])*scale,paste((xvals[5]-xvals[1])*scale,unit)) text(xvals[c(1,3,5)],yvals[4],labels=labels,adj=.5,cex=t.cex) } SDMTools/R/destination.R0000644000176000001440000001243712370121703014616 0ustar ripleyusers#' Vincenty Direct Calculation of a Destination #' #' \code{destination} estimates the destination latitude and longitude given a #' starting latitude and longitude, a bearing and distance. \cr \cr For general #' information on Vincenty's formula, see e.g., #' \url{http://en.wikipedia.org/wiki/Vincenty's_formulae}. It states: \cr #' \emph{Vincenty's formulae are two related iterative methods used in geodesy #' to calculate the distance between two points on the surface of an spheroid, #' developed by Thaddeus Vincenty in 1975. They are based on the assumption #' that the figure of the Earth is an oblate spheroid, and hence are more #' accurate than methods such as great-circle distance which assume a spherical #' Earth.} \cr \cr \bold{Note:} this method assumes a locations are lat & lon #' given in WGS 84. #' #' Typical useages are:\cr \enumerate{ \item a single start location, bearing #' and distance to give a single output location\cr --output would be a single #' destination location \item a single start location with one or more bearings #' or distances to give multiple output locations\cr --output would be a #' destination locations for each combination of bearings and distances \item #' multiple start locations with a single bearing or distance\cr --output would #' be a destination locations representing the bearing and distance from each #' of the start locations \item multiple start locations with multiple bearings #' or distances\cr --output would be a destination locations representing the #' combinations of bearings and distances from each of the start locations\cr #' -- NOTE that the bearing and distance vectors must be of the same length of #' the input lat and long. } #' #' See examples for all possible usages. #' #' @param lat a single value or vector of values representing latitude in #' decimal degrees from -90 to 90 degrees. #' @param lon a single value or vector of values representing longitude in #' decimal degrees from -180 to 180 degrees. #' @param bearing a single value or vector of values representing the bearings #' (directions) of interest ranging from 0 to 360 degrees. #' @param distance a single value or vector of values representing the #' distances in metres to the destination. #' @return Returns a data.frame with: \item{lon1}{the original longitude} #' \item{lat1}{the original latitude} \item{bearing}{the bearing used} #' \item{distance}{the distance used} \item{lon2}{the destination longitude} #' \item{lat2}{the destination latitude} #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @references Vincenty, T. 1975. Direct and Inverse Solutions of Geodesics on #' the Ellipsoid with application of Nested Equations. Survey Review, vol XXII #' no 176. \url{http://www.ngs.noaa.gov/PUBS_LIB/inverse.pdf} #' @source The source code here was modified from #' \url{http://www.movable-type.co.uk/scripts/latlong-vincenty-direct.html}.\cr #' \cr Destinations were validated against Geoscience Australia calculations #' (\url{http://www.ga.gov.au/geodesy/datums/vincenty_direct.jsp}). #' @examples #' #' #' ###single lat lons #' lats = -85; lons = 165 #' #single bearing & single distance #' destination(lats,lons,bearing=180,distance=500000) #' #' #multiple bearings #' destination(lats,lons,bearing=seq(0,360,length.out=9),distance=500000) #' #' #multiple bearings #' destination(lats,lons,bearing=45,distance=seq(0,5000000,length.out=11)) #' #' #multiple bearings, multiple distances #' destination(lats,lons,bearing=seq(0,360,length.out=9), #' distance=seq(0,5000000,length.out=11)) #' #' ###multiple lat lons #' lats = seq(-90,90,length.out=9); lons = seq(-180,180,length.out=9) #' #' #multiple lat lons but single bearings / distances #' destination(lats,lons,bearing=45,distance=500000) #' #' #different bearings for each lat lon #' destination(lats,lons,bearing=seq(0,360,length.out=9),distance=500000) #' #' #different distances for each lat lon #' destination(lats,lons,bearing=45,distance=seq(0,5000000,length.out=9)) #' #' #different bearings & distances for each lat lon #' destination(lats,lons,bearing=seq(0,360,length.out=9), #' distance=seq(0,5000000,length.out=9)) #' #' #' @export #' @useDynLib SDMTools Dest destination = function(lat, lon, bearing, distance) { #check the data if (length(lat)!=length(lon)) stop('lat & lon must be of the same length') if (any(lon < -180) | any(lon > 180)) stop('lon must be decimal degrees between 0 & 360') if (any(lat < -90) | any(lat > 90)) stop('lat must be decimal degrees between -90 & 90') if (length(lat)==1) { out = expand.grid(bearing=bearing,distance=distance) out = data.frame(lon1=lon,lat1=lat,out,lon2=NA,lat2=NA) } else { if (length(bearing)>1) { if (length(bearing)!=length(lat)) stop('number of bearing values is not the same length as lon & lat') } else { bearing = rep(bearing,length(lat)) } if (length(distance)>1) { if (length(distance)!=length(lat)) stop('number of distance values is not the same length as lon & lat') } else { distance = rep(distance,length(lat)) } out = data.frame(lon1=lon,lat1=lat,bearing=bearing,distance=distance,lon2=NA,lat2=NA) } #clatcle through and output the new data for (ii in 1:nrow(out)) { tt = .Call('Dest',out$lat1[ii],out$lon1[ii],out$bearing[ii],out$distance[ii],PACKAGE='SDMTools') out$lon2[ii] = tt[2]; out$lat2[ii] = tt[1] } #return the output return(out) } SDMTools/R/grid.info.R0000644000176000001440000000734212370121703014153 0ustar ripleyusers#' Grid Information from Geographic (lat lon) Projections #' #' Since spatial grids in geographic projections do not have equal area or #' perimeters, \code{grid.info} extracts perimeter & area related information #' for latitudinal bands with differing longitudinal widths. \cr\cr Outputs #' lengths are in m using Vincenty's equation (\code{distance})and areas in m2. #' Surface areas are calculated summing surface areas of spherical polygons as #' estimated using l'Huiller's formula. #' #' #' @param lats is a vector of latitudes representing the midpoint of grid cells #' @param cellsize is a single value (assuming square cells) or a two value #' vector (rectangular cells) representing the height (latitude) and width #' (longitude) of the cells #' @param r is a single value representing the radius of the globe in m. #' Default is for the WGS84 elipsoid #' @return a data.frame listing: \item{lat}{the latitude representing the #' midpoint of the cell} \item{top}{length of the top of the cell (m)} #' \item{bottom}{length of the bottom of the cell (m)} \item{side}{length of #' the side of the cell (m)} \item{diagnal}{length of the diagnals of the cell #' (m)} \item{area}{area of the cell (m2)} #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @references information on l'Huiller's formula #' \url{http://williams.best.vwh.net/avform.htm for more info)} code for #' estimating area of polygon on sphere was modified from #' \url{http://forum.worldwindcentral.com/showthread.php?t=20724} #' @examples #' #' #show output for latitudes from -87.5 to 87.5 at 5 degree intervals #' grid.info(lats=seq(-87.5,87.5,5), 5) #' #' @export grid.info = function(lats,cellsize,r=6378137) { r2 = r^2 #radius of earth ###need checks to ensure lats will not go beyond 90 & -90 if (length(cellsize)==1) cellsize=rep(cellsize,2) #ensure cellsize is defined for both lat & lon out = data.frame(lat=lats) #setup the output dataframe toplats = lats+(0.5*cellsize[1]); bottomlats = lats-(0.5*cellsize[1]) #define the top and bottom lats check = range(c(toplats,bottomlats),na.rm=TRUE); if (-90>check[1] | 90 180)) stop('lon must be decimal degrees between -180 & 180') if (any(c(lat1,lat2) < -90) | any(c(lat1,lat2) > 90)) stop('lat must be decimal degrees between -90 & 90') #cycle through and output the new data out = data.frame(lat1=lat1,lon1=lon1,lat2=lat2,lon2=lon2) out$distance = round(.Call('Dist',out$lat1,out$lon1,out$lat2,out$lon2,PACKAGE='SDMTools'),2) #round to the nearest mm if (bearing) { #if requested, calculate bearing lat1=lat1*pi/180;lat2=lat2*pi/180;lon1=lon1*pi/180;lon2=lon2*pi/180 #convert to radians brng = atan2(sin(lon2-lon1)*cos(lat2),cos(lat1)*sin(lat2)-sin(lat1)*cos(lat2)*cos(lon1-lon2)) #estimate bearing out$bearing = ((brng*180/pi)+360)%%360 #convert to bearing in degrees } #return the output return(out) } SDMTools/R/omission.R0000644000176000001440000000402612370121703014130 0ustar ripleyusers#' Measures of Accuracy #' #' Estimates different measures of accurracy given a confusion matrix. #' #' #' @param mat a confusion matrix of class 'confusion.matrix' from #' \code{confusion.matrix} #' @return returns single values representing the: \item{ommission}{the #' ommission rate as a proportion of true occurrences misidentified given the #' defined threshold value} \item{sensitivity}{the sensitivity given the #' defined threshold value} \item{specificity}{the specificity given the #' defined threshold value} \item{prop.correct}{the proportion of the presence #' and absence records correctly identified given the defined threshold value} #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @seealso \code{\link{auc}}, \code{\link{Kappa}}, #' \code{\link{confusion.matrix}}, \code{\link{accuracy}} #' @examples #' #' #' #create some data #' obs = c(sample(c(0,1),20,replace=TRUE),NA); obs = obs[order(obs)] #' pred = runif(length(obs),0,1); pred = pred[order(pred)] #' #' #calculate the confusion matrix #' mat = confusion.matrix(obs,pred,threshold=0.5) #' #' #calculate the accuracy measures #' omission(mat) #' sensitivity(mat) #' specificity(mat) #' prop.correct(mat) #' #' #' @export omission <- function(mat){ #input checks if (attr(mat,'class')!='confusion.matrix') stop('mat must be of class confusion.matrix') #return the value return(mat[1,2]/sum(mat[,2])) } #' @rdname omission #' @export sensitivity = function(mat) { #input checks if (attr(mat,'class')!='confusion.matrix') stop('mat must be of class confusion.matrix') #return the value return(mat[2,2]/sum(mat[,2])) } #' @rdname omission #' @export specificity = function(mat) { #input checks if (attr(mat,'class')!='confusion.matrix') stop('mat must be of class confusion.matrix') #return the value return(mat[1,1]/sum(mat[,1])) } #' @rdname omission #' @export prop.correct = function(mat) { #input checks if (attr(mat,'class')!='confusion.matrix') stop('mat must be of class confusion.matrix') #return the value return(sum(diag(mat))/sum(mat)) } SDMTools/R/grid.area.R0000644000176000001440000000571412370121703014131 0ustar ripleyusers#' Create a Grid of Cell Areas or Perimeters #' #' Creates a grid of cell areas or perimeters for spatial grids in geographic #' (lat-lon) projections. #' #' #' @param mat a matrix representing a raster of class 'asc' (this & adehabitat #' package), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp #' package) #' @return \item{grid.area}{Returns an ascii grid file which contains the #' values of the area in each cell.} \item{grid.perimter}{Returns an ascii grid #' file which contains the values of the perimeter in each cell. } #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} & Lorena Falconi #' \email{lorefalconi@@gmail.com} #' @examples #' #' #Create an ascii file #' y=seq(10,50,0.5) #' x=seq(140,180,0.5) #' cellsize=0.5 #' data1=sample(160,140) #' out1.asc=as.asc(matrix(data1,nc=y, nr=x), xll=min(x), yll=min(y), cellsize=cellsize) #' #' grid.area(out1.asc)[,] #' #' grid.perimeter(out1.asc)[,] #' #' @export grid.area <- function(mat) { #check input for class for returning info if (any(class(mat) == 'asc')) { attrib = attributes(mat) } else if (any(class(mat) %in% 'RasterLayer')) { attrib = mat; mat = asc.from.raster(mat) } else if (any(class(mat) == 'SpatialGridDataFrame')) { attrib = mat; mat = asc.from.sp(mat) } else { attrib = attributes(mat) } #check to ensure asc if (!any(class(mat) == 'asc')) { stop('objects must be of class "asc"') } #apply the gridinfo area = grid.info(getXYcoords(mat)$y,attr(mat,'cellsize'))$area mat[is.finite(mat)] = 1; for (ii in 1:length(area)) mat[,ii] = mat[,ii] * area[ii] #reset the attributes of the input if (any(class(attrib) %in% 'RasterLayer')) { attrib = setValues(attrib, as.vector(t(t(unclass(mat))[dim(mat)[2]:1,]))); return(attrib) } else if (any(class(attrib) == 'SpatialGridDataFrame')) { attrib@data[1] = as.vector(unclass(mat)[,dim(mat)[2]:1]); return(attrib) } else { attributes(mat) = attrib; return(mat) } } #' @rdname grid.area #' @export grid.perimeter <- function(mat) { #check input for class for returning info if (any(class(mat) == 'asc')) { attrib = attributes(mat) } else if (any(class(mat) %in% 'RasterLayer')) { attrib = mat; mat = asc.from.raster(mat) } else if (any(class(mat) == 'SpatialGridDataFrame')) { attrib = mat; mat = asc.from.sp(mat) } else { attrib = attributes(mat) } #check to ensure asc if (!any(class(mat) == 'asc')) { stop('objects must be of class "asc"') } #apply the gridinfo perim = grid.info(getXYcoords(mat)$y,attr(mat,'cellsize')) perim = perim$top+perim$bottom+2*perim$side mat[is.finite(mat)] = 1; for (ii in 1:length(perim)) mat[,ii] = mat[,ii] * perim[ii] #reset the attributes of the input if (any(class(attrib) %in% 'RasterLayer')) { attrib = setValues(attrib, as.vector(t(t(unclass(mat))[dim(mat)[2]:1,]))); return(attrib) } else if (any(class(attrib) == 'SpatialGridDataFrame')) { attrib@data[1] = as.vector(unclass(mat)[,dim(mat)[2]:1]); return(attrib) } else { attributes(mat) = attrib; return(mat) } } SDMTools/R/asc.from.raster.R0000644000176000001440000001054512370121703015302 0ustar ripleyusers#' Raster conversion functions for adehabitat, raster and sp packages #' #' \code{asc.from.raster} and \code{asc.from.sp} extracts data from objects of #' class 'RasterLayer' (raster package) and class 'SpatialGridDataFrame' (sp #' package) into an object of class 'asc' (SDMTools & adehabitat packages). \cr #' \cr \code{raster.from.asc} and \code{sp.from.asc} does the reverse.\cr\cr #' \code{as.asc} creates an object of class 'asc' (SDMTools & adehabitat #' packages) from a matrix of data. Code & helpfile associated with #' \code{as.asc} were modified from adehabitat package. #' #' These functions provide capabilities of using scripts / functions from many #' packages including adehabitat (plus e.g, SDMTools), sp (plus e.g., maptools, #' rgdal) and raster. #' #' @param x is an object of class 'asc', 'RasterLayer' or #' 'SpatialGridDataFrame'. For the function \code{as.asc}, a matrix #' @param projs is a CRS projection string of the Proj4 package #' @param xll the x coordinate of the center of the lower left pixel of the map #' @param yll the y coordinate of the center of the lower left pixel of the map #' @param cellsize the size of a pixel on the studied map #' @param type a character string. Either \code{"numeric"} or \code{"factor"} #' @param lev if \code{type = "factor"}, either a vector giving the labels of #' the factor levels, or the name of a file giving the correspondence table of #' the map (see adehabitat as.asc helpfile details) #' @return Returns an object of class requested. #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @examples #' #' #' #create a simple object of class 'asc' #' tasc = as.asc(matrix(rep(x=1:10, times=1000),nr=100)); print(tasc) #' str(tasc) #' #' #convert to RasterLayer #' traster = raster.from.asc(tasc) #' str(traster) #' #' #convert to SpatialGridDataFrame #' tgrid = sp.from.asc(tasc) #' str(tgrid) #' #' #create a basic object of class asc #' tasc = as.asc(matrix(rep(x=1:10, times=1000),nr=100)); print(tasc) #' #' #' @export asc.from.raster = function(x) { if (!any(class(x) %in% 'RasterLayer')) stop('x must be of class raster or RasterLayer') cellsize = (x@extent@ymax-x@extent@ymin)/x@nrows yll = x@extent@ymin + 0.5 * cellsize xll = x@extent@xmin + 0.5 * cellsize tmat = t(matrix(getValues(x),nrow=x@nrows,ncol=x@ncols,byrow=T)[x@nrows:1,]) tmat[which(tmat==x@file@nodatavalue)] = NA return(as.asc(tmat,yll=yll,xll=xll,cellsize=cellsize)) } #' @rdname asc.from.raster #' @export raster.from.asc = function(x,projs=NA) { if (class(x) != 'asc') stop('x must be of class asc') require(raster) cellsize = attr(x, "cellsize") nrows = dim(x)[2]; ncols= dim(x)[1] xmin = attr(x, "xll") - 0.5 * cellsize ymin = attr(x, "yll") - 0.5 * cellsize xmax = xmin + ncols*cellsize ymax = ymin + nrows*cellsize r <- raster(ncols=ncols, nrows=nrows, xmn=xmin, xmx=xmax, ymn=ymin, ymx=ymax) projection(r) <- projs tvals = as.vector(t(t(unclass(x))[nrows:1,])); tvals[which(is.na(tvals))] = r@file@nodatavalue r <- setValues(r, as.vector(t(t(unclass(x))[nrows:1,]))) return(r) } #' @rdname asc.from.raster #' @export asc.from.sp = function(x) { #assumes single band data if (!any(class(x) == 'SpatialGridDataFrame')) stop('x must be of class SpatialGridDataFrame') cellsize = mean(x@grid@cellsize) yll = as.numeric(x@grid@cellcentre.offset[2]) xll = as.numeric(x@grid@cellcentre.offset[1]) names(x@data)[1] = 'z' tmat = t(matrix(x@data$z,nrow=x@grid@cells.dim[2],ncol=x@grid@cells.dim[1],byrow=T)[x@grid@cells.dim[2]:1,]) return(as.asc(tmat,yll=yll,xll=xll,cellsize=cellsize)) } #' @rdname asc.from.raster #' @export sp.from.asc = function(x,projs=CRS(as.character(NA))) { if (!inherits(x, "asc")) stop('x must be of class asc') require(sp) tgrid = GridTopology(c(attr(x, "xll"),attr(x, "yll")),rep(attr(x, "cellsize"),2),dim(x)) return(SpatialGridDataFrame(tgrid,data.frame(z=as.vector(unclass(x)[,dim(x)[2]:1])),proj4string=projs)) } #' @rdname asc.from.raster #' @export as.asc = function(x, xll=1, yll=1, cellsize=1,type=c("numeric", "factor"),lev=levels(factor(x))) { #check inputs type=match.arg(type) if (!inherits(x, "matrix")) stop("x should be a matrix") # creates the attributes mode(x) = "numeric"; attr(x, "xll") = xll; attr(x, "yll") = yll attr(x, "cellsize")=cellsize; attr(x, "type") = type if (type=="factor") attr(x, "levels") = lev class(x) = "asc" #return the object return(x) } SDMTools/R/pnt.in.poly.R0000644000176000001440000000532412370121703014462 0ustar ripleyusers#' Point in Polygon #' #' \code{pnt.in.poly} works out if 2D points lie within the boundaries of a #' defined polygon. \cr \cr \bold{Note:} Points that lie on the boundaries of #' the polygon or vertices are assumed to be within the polygon. #' #' The algorithm implements a sum of the angles made between the test point and #' each pair of points making up the polygon. The point is interior if the sum #' is 2pi, otherwise, the point is exterior if the sum is 0. This works for #' simple and complex polygons (with holes) given that the hole is defined with #' a path made up of edges into and out of the hole. \cr \cr This sum of angles #' is not able to consistently assign points that fall on vertices or on the #' boundary of the polygon. The algorithm defined here assumes that points #' falling on a boundary or polygon vertex are part of the polygon. #' #' @param pnts a 2-column matrix or dataframe defining locations of the points #' of interest #' @param poly.pnts a 2-column matrix or dataframe defining the locations of #' vertices of the polygon of interest #' @return A 3-column dataframe where the first 2 columns are the original #' locations of the points. The third column (names pip) is a vector of binary #' values where 0 represents points not with the polygon and 1 within the #' polygon. #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @examples #' #' #' #define the points and polygon #' pnts = expand.grid(x=seq(1,6,0.1),y=seq(1,6,0.1)) #' polypnts = cbind(x=c(2,3,3.5,3.5,3,4,5,4,5,5,4,3,3,3,2,2,1,1,1,1,2), #' y=c(1,2,2.5,2,2,1,2,3,4,5,4,5,4,3,3,4,5,4,3,2,2)) #' #' #plot the polygon and all points to be checked #' plot(rbind(polypnts, pnts)) #' polygon(polypnts,col='#99999990') #' #' #create check which points fall within the polygon #' out = pnt.in.poly(pnts,polypnts) #' head(out) #' #' #identify points not in the polygon with an X #' points(out[which(out$pip==0),1:2],pch='X') #' #' #' @export pnt.in.poly #' @useDynLib SDMTools pip pnt.in.poly <- function(pnts,poly.pnts) { #check if pnts & poly is 2 column matrix or dataframe pnts = as.matrix(pnts); poly.pnts = as.matrix(poly.pnts) if (!(is.matrix(pnts) & is.matrix(poly.pnts))) stop('pnts & poly.pnts must be a 2 column dataframe or matrix') if (!(dim(pnts)[2] == 2 & dim(poly.pnts)[2] == 2)) stop('pnts & poly.pnts must be a 2 column dataframe or matrix') #ensure first and last polygon points are NOT the same if (poly.pnts[1,1] == poly.pnts[nrow(poly.pnts),1] & poly.pnts[1,2] == poly.pnts[nrow(poly.pnts),2]) poly.pnts = poly.pnts[-1,] #run the point in polygon code out = .Call('pip',pnts[,1],pnts[,2],nrow(pnts),poly.pnts[,1],poly.pnts[,2],nrow(poly.pnts),PACKAGE='SDMTools') #return the value return(data.frame(pnts,pip=out)) } SDMTools/R/read.asc.R0000644000176000001440000001722512370121703013755 0ustar ripleyusers#' ESRI ASCII Raster File Import And Export #' #' \code{read.asc} and \code{read.asc.gz} reads ESRI ArcInfo ASCII raster file #' either uncompressed or compressed using gzip. \cr \cr \code{write.asc} and #' \code{write.asc.gz} writes an asc object to a ESRI ArcInfo ASCII raster #' file. The output can be either compressed or uncompressed. \cr \cr These #' functions are faster methods based on the adehabitat import.asc and #' export.asc.\cr\cr \code{write.asc2} and \code{write.asc2.gz} are even faster #' implementations but have less error checking. \cr \cr \code{image.asc} and #' \code{print.asc} are generic methods associated with plotting & summarizing #' data of class 'asc'; they were modified from adehabitat package. #' #' Implements a faster version of import.asc or export.asc from the adehabitat #' package. In addition, files can be read in and written to in gzip compressed #' format.\cr\cr Generic methods of print and image were modified from #' adehabitat. Further details of them are found there. #' #' @param file a character string representing the filename of the input/output #' file. The file extension should always be '.asc'. #' @param gz defines if the object is or should be compressed using gzip #' @param x an object of class 'asc' as defined in the adehabitat package #' @param sigdig is the number of significant digits to write when creating the #' ascii grid file #' @param col for maps of type \code{"numeric"}, the colors to be used (see #' \code{help(par)}) #' @param clfac for maps of type \code{"factor"}, a character vector giving the #' names of colors for each level of the factor (see \code{help(colasc)}) #' @param \dots additional arguments to be passed to the generic function #' \code{image} or \code{print} #' @return Returns a raster matrix of the class 'asc' defined in the adehabitat #' package with the following attributes: \item{xll}{the x coordinate of the #' center of the lower left pixel of the map} \item{yll}{the y coordinate of #' the center of the lower left pixel of the map} \item{cellsize}{the size of a #' pixel on the studied map} \item{type}{either 'numeric' or 'factor'} #' \item{levels}{if type = 'factor', the levels of the factor.} #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @examples #' #' #' #create a simple object of class 'asc' #' tasc = as.asc(matrix(rep(x=1:10, times=1000),nr=100)); print(tasc) #' #' #write out the raster grid file #' write.asc(tasc,'t.raster.asc') #' write.asc.gz(tasc,'t.raster.asc') #actually save file name as t.raster.asc.gz #' #' #read in the raster grid files #' tasc2 = read.asc('t.raster.asc') #' tasc3 = read.asc.gz('t.raster.asc.gz') #' #' #remove the temporary raster #' unlink(c('t.raster.asc','t.raster.asc.gz')) #' #' #' @export #' @useDynLib SDMTools writeascdata read.asc <- function (file, gz=FALSE) { #confirm ascii grid file name is specified if (gz) { if (substr(file, nchar(file) - 2, nchar(file)) != ".gz") stop("not a valid .gz file") } else { if (substr(tolower(file), nchar(file) - 3, nchar(file)) != ".asc") stop("not a valid .asc file") } #read in the header if (gz) { zz <- gzfile(file, "r") } else { zz <- file(file, "r") } nc <- scan(zz,what=list('',''),nlines=1,quiet=TRUE); nc <- as.numeric(nc[[2]][1])#number of columns nl <- scan(zz,what=list('',''),nlines=1,quiet=TRUE); nl <- as.numeric(nl[[2]][1])#number of rows xll <- scan(zz,what=list('',''),nlines=1,quiet=TRUE); #lower left corner yll <- scan(zz,what=list('',''),nlines=1,quiet=TRUE); #lower left corner cs <- scan(zz,what=list('',''),nlines=1,quiet=TRUE); cs <- as.numeric(cs[[2]][1])#cell size nas <- scan(zz,what=list('',''),nlines=1,quiet=TRUE); nas <- as.numeric(nas[[2]][1])#nodata value #close the link to the file close(zz) #ensure xll & yll are centers of the cells if ((xll[[1]][1] == "xllcenter") | (xll[[1]][1] == "XLLCENTER")) { xll=as.numeric(xll[[2]][1]) } else { xll=as.numeric(xll[[2]][1])+ cs/2 } if ((yll[[1]][1] == "yllcenter") | (xll[[1]][1] == "YLLCENTER")) { yll=as.numeric(yll[[2]][1]) } else { yll=as.numeric(yll[[2]][1])+ cs/2 } #read in the data skipping the first six header rows if (gz) { zz <- gzfile(file, "r"); output <- scan(zz,nmax=nl*nc,skip=6,quiet = TRUE); close(zz); } else { output <- scan(file,nmax=nl*nc,skip=6, quiet = TRUE) } #convert no data to NA output[output == nas] <- NA #convert data to matrix output <- matrix(c(as.matrix(output)), ncol = nl, nrow = nc) output <- output[, ncol(output):1] #define the attributes attr(output, "xll") <- xll attr(output, "yll") <- yll attr(output, "cellsize") <- cs attr(output, "type") <- 'numeric' class(output) <- "asc" #return the file return(output) } #' @rdname read.asc #' @export read.asc.gz <- function (file) { return(read.asc(file, gz=TRUE)) } #' @rdname read.asc #' @export write.asc <- function (x, file, gz=FALSE) { #confirm asc object and file named appropriately if (!inherits(x, "asc")) stop("Non convenient data") if (substr(file, nchar(file) - 3, nchar(file)) != ".asc") file <- paste(file, ".asc", sep = "") #open a connection to file if (gz) { zz <- gzfile(paste(file, ".gz", sep = ""),"w") } else { zz <- file(file, "w") } #write the header info cat("ncols ",nrow(x),'\n',sep = "",file=zz) cat("nrows ",ncol(x),'\n',sep = "",file=zz) cat("xllcorner ",as.character(attr(x,"xll")-attr(x,"cellsize")/2),'\n',sep = "",file=zz) cat("yllcorner ",as.character(attr(x,"yll")-attr(x,"cellsize")/2),'\n',sep = "",file=zz) cat("cellsize ",as.character(attr(x, "cellsize")),'\n',sep = "",file=zz) cat("NODATA_value ", -9999,'\n',sep = "",file=zz) #prep and write the data x[is.na(x)] <- -9999 #change na values x <- x[, ncol(x):1] #reorder x <- do.call('rbind',list(x,"\n")) #add new line character cat(x,file=zz) #close the connection to the file close(zz) } #' @rdname read.asc #' @export write.asc.gz <- function (x, file) { write.asc(x, file, gz=TRUE) } #' @rdname read.asc #' @export #' @import R.utils write.asc2 <- function (x, file, sigdig = 0, gz=FALSE) { ###confirm asc object and file named appropriately if (!inherits(x, "asc")) stop("Non convenient data") if (substr(file, nchar(file) - 3, nchar(file)) != ".asc") file <- paste(file, ".asc", sep = "") ###write out the data tt = .Call('writeascdata', ncol(x) , nrow(x) , as.character(attr(x,"xll")-attr(x,"cellsize")/2) , as.character(attr(x,"yll")-attr(x,"cellsize")/2) , as.character(attr(x, "cellsize")) , x , file , sigdig, PACKAGE='SDMTools') if (gz) { gzip(file) } } #' @rdname read.asc #' @export #' @import R.utils write.asc2.gz <- function (x, file, sigdig = 0) { write.asc2(x, file, sigdig=sigdig, gz=TRUE) } #' @rdname read.asc #' @export "image.asc" <- function (x, col = gray((240:1)/256), clfac = NULL, ...) { ## Verifications if (!inherits(x, "asc")) stop("not an \"asc\" object") ## Coordinates of the pixels xy <- getXYcoords(x) xx <- xy$x yy <- xy$y ## If the variable is numeric if (attr(x, "type") == "numeric") image(x = xx, y = yy, x, asp = 1, col = col, ...) ## For a factor: creates colors if (attr(x, "type") == "factor") { if (is.null(clfac)) { clfac <- rainbow(nlevels(x)) clfac <- clfac[as.numeric(levels(factor(x)))] } image(x = xx, y = yy, x, asp = 1, col = clfac, ...) } } #' @rdname read.asc #' @export "print.asc" <- function(x, ...) { ## Verifications if (!inherits(x, "asc")) stop("Non convenient data") ## The output cat("Raster map of class \"asc\":\n") cat("Cell size: ", attr(x, "cellsize"), "\n") cat("Number of rows: ", ncol(x), "\n") cat("Number of columns: ", nrow(x), "\n") cat("Type: ", attr(x, "type"), "\n") } SDMTools/R/ClassStat.R0000644000176000001440000002563212370121703014177 0ustar ripleyusers#' Landscape Class Statistics #' #' \code{ClassStat} calculates the class statistics for patch types identified #' in a matrix of data or in a raster of class 'asc' (SDMTools & adehabitat #' packages), 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp #' package). #' #' The class statistics are based on statistics calculated by fragstats #' \url{http://www.umass.edu/landeco/research/fragstats/fragstats.html}. #' #' @param mat a matrix of data with patches identified as classes (unique #' integer values) as e.g., a binary lanscape of a species distribution or a #' vegetation map. Matrix can be a raster of class 'asc' (adehabitat package), #' 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package) #' @param cellsize cell size (in meters) is a single value representing the #' width/height of cell edges (assuming square cells) #' @param bkgd the background value for which statistics will not be calculated #' @param latlon boolean value representing if the data is geographic. If #' latlon == TRUE, matrix must be of class 'asc', 'RasterLayer' or #' 'SpatialGridDataFrame' #' @return a data.frame listing \item{class}{a particular patch type from the #' original input matrix (\code{mat}).} \item{n.patches}{the number of patches #' of a particular patch type or in a class.} \item{total.area}{the sum of the #' areas (m2) of all patches of the corresponding patch type.} #' \item{prop.landscape}{the proportion of the total lanscape represented by #' this class} \item{patch.density}{the numbers of patches of the corresponding #' patch type divided by total landscape area (m2).} \item{total.edge}{the #' total edge length of a particular patch type.} \item{edge.density}{edge #' length on a per unit area basis that facilitates comparison among landscapes #' of varying size.} \item{landscape.shape.index}{a standardized measure of #' total edge or edge density that adjusts for the size of the landscape.} #' \item{largest.patch.index}{largest patch index quantifies the percentage of #' total landscape area comprised by the largest patch.} #' \item{mean.patch.area}{average area of patches.} #' \item{sd.patch.area}{standard deviation of patch areas.} #' \item{min.patch.area}{the minimum patch area of the total patch areas. } #' \item{max.patch.area}{the maximum patch area of the total patch areas.} #' \item{perimeter.area.frac.dim}{perimeter-area fractal dimension equals 2 #' divided by the slope of regression line obtained by regressing the logarithm #' of patch area (m2) against the logarithm of patch perimeter (m).} #' \item{mean.perim.area.ratio}{the mean of the ratio patch perimeter. The #' perimeter-area ratio is equal to the ratio of the patch perimeter (m) to #' area (m2).} \item{sd.perim.area.ratio}{standard deviation of the ratio patch #' perimeter.} \item{min.perim.area.ratio}{minimum perimeter area ratio} #' \item{max.perim.area.ratio}{maximum perimeter area ratio.} #' \item{mean.shape.index}{mean of shape index} \item{sd.shape.index}{standard #' deviation of shape index.} \item{min.shape.index}{the minimum shape index.} #' \item{max.shape.index}{the maximum shape index.} #' \item{mean.frac.dim.index}{mean of fractal dimension index.} #' \item{sd.frac.dim.index}{standard deviation of fractal dimension index.} #' \item{min.frac.dim.index}{the minimum fractal dimension index.} #' \item{max.frac.dim.index}{the maximum fractal dimension index.} #' \item{total.core.area}{the sum of the core areas of the patches (m2).} #' \item{prop.landscape.core}{proportional landscape core} #' \item{mean.patch.core.area}{mean patch core area.} #' \item{sd.patch.core.area}{standard deviation of patch core area.} #' \item{min.patch.core.area}{the minimum patch core area.} #' \item{max.patch.core.area}{the maximum patch core area.} #' \item{prop.like.adjacencies}{calculated from the adjacency matrix, which #' shows the frequency with which different pairs of patch types (including #' like adjacencies between the same patch type) appear side-by-side on the map #' (measures the degree of aggregation of patch types).} #' \item{aggregation.index}{computed simply as an area-weighted mean class #' aggregation index, where each class is weighted by its proportional area in #' the landscape.} \item{lanscape.division.index}{based on the cumulative patch #' area distribution and is interpreted as the probability that two randomly #' chosen pixels in the landscape are not situated in the same patch} #' \item{splitting.index}{based on the cumulative patch area distribution and #' is interpreted as the effective mesh number, or number of patches with a #' constant patch size when the landscape is subdivided into S patches, where S #' is the value of the splitting index.} \item{effective.mesh.size}{equals 1 #' divided by the total landscape area (m2) multiplied by the sum of patch area #' (m2) squared, summed across all patches in the landscape.} #' \item{patch.cohesion.index}{measures the physical connectedness of the #' corresponding patch type.} #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @seealso \code{\link{PatchStat}}, \code{\link{ConnCompLabel}} #' @references McGarigal, K., S. A. Cushman, M. C. Neel, and E. Ene. 2002. #' FRAGSTATS: Spatial Pattern Analysis Program for Categorical Maps. Computer #' software program produced by the authors at the University of Massachusetts, #' Amherst. Available at the following web site: #' \url{www.umass.edu/landeco/research/fragstats/fragstats.html} #' @examples #' #' #' #define a simple binary matrix #' tmat = { matrix(c( 0,0,0,1,0,0,1,1,0,1, #' 0,0,1,0,1,0,0,0,0,0, #' 0,1,NA,1,0,1,0,0,0,1, #' 1,0,1,1,1,0,1,0,0,1, #' 0,1,0,1,0,1,0,0,0,1, #' 0,0,1,0,1,0,0,1,1,0, #' 1,0,0,1,0,0,1,0,0,1, #' 0,1,0,0,0,1,0,0,0,1, #' 0,0,1,1,1,0,0,0,0,1, #' 1,1,1,0,0,0,0,0,0,1),nr=10,byrow=TRUE) } #' #' #do the connected component labelling #' ccl.mat = ConnCompLabel(tmat) #' ccl.mat #' image(t(ccl.mat[10:1,]),col=c('grey',rainbow(length(unique(ccl.mat))-1))) #' #' #calculate the patch statistics #' ps.data = PatchStat(ccl.mat) #' ps.data #' #' #calculate the class statistics #' cl.data = ClassStat(tmat) #' cl.data #' #' #identify background data is 0 #' cl.data = ClassStat(tmat,bkgd=0) #' cl.data #' #' #' #' @export ClassStat = function(mat,cellsize=1,bkgd=NA,latlon=FALSE) { ##method to calculate shape index or aggregation indexes #a = area of the patch in number of cells #p is the perimeter in number of edges #g is the number of 'internal' edges (single count) aggregation.index = function(a,g) { n = trunc(sqrt(a)) m = a - n^2 if (m==0) maxg = 2*n*(n-1) if (m<=n) maxg = 2*n*(n-1)+2*m-1 if (m>n) maxg = 2*n*(n-1)+2*m-2 minp=rep(0,length(m)) for (ii in 1:length(m)){ if (m[ii]==0) minp[ii] = 4*n[ii] if (n[ii]^2 n[ii]*(1+n[ii])) minp[ii] = 4 * n[ii] + 4 } return((g/maxg)*100) } shape.index = function(a,p) { n = trunc(sqrt(a)) m = a - n^2 minp=rep(0,length(m)) for (ii in 1:length(m)){ if (m[ii]==0) minp[ii] = 4*n[ii] if (n[ii]^2 n[ii]*(1+n[ii])) minp[ii] = 4 * n[ii] + 4 } return(p/minp) } ###### #check if raster from sp or raster package and convert if necessary if (any(class(mat) %in% 'RasterLayer')) mat = asc.from.raster(mat) if (any(class(mat) == 'SpatialGridDataFrame')) mat = asc.from.sp(mat) #check to ensure matrix mat = try(as.matrix(mat)) if (!is.matrix(mat)) stop('objects must be a matrix') #get the uniqu classes of data classes = as.numeric(na.omit(unique(as.vector(mat))));classes = classes[order(classes)] #omit the background value if (!is.na(bkgd)) classes = classes[-which(classes==bkgd)] #out is the final object to be returned out = NULL #cycle through each of the classes for (cl in classes){ #create a reclassed matrix mat2 = mat; mat2 = mat * 0; mat2[which(mat==cl)] = 1 #get the patch info for the class out.patch = PatchStat(ConnCompLabel(mat2),cellsize=cellsize,latlon=latlon);rm(mat2) #define a couple constants L.cell = sum(out.patch$n.cell) #n cells in landscape L.area = sum(out.patch$area) #full area of landscape #remove the background patch (id = 0) if (0 %in% out.patch$patchID) out.patch = out.patch[-which(out.patch$patchID==0),] #create a temporary variable to store output & calculate patch stats tout = list(class=cl) tout$n.patches = nrow(out.patch) tout$total.area = sum(out.patch$area) tout$prop.landscape = tout$total.area / L.area tout$patch.density = tout$n.patches / L.area tout$total.edge = sum(out.patch$perimeter) tout$edge.density = tout$total.edge / L.area tout$landscape.shape.index = shape.index(sum(out.patch$n.cell),sum(out.patch$n.edges.perimeter)) tout$largest.patch.index = max(out.patch$area) / L.area tout$mean.patch.area = mean(out.patch$area) tout$sd.patch.area = sd(out.patch$area) tout$min.patch.area = min(out.patch$area) tout$max.patch.area = max(out.patch$area) tout$perimeter.area.frac.dim = 2 / (((tout$n.patches*sum(log(out.patch$perimeter)+log(out.patch$area)))-(tout$total.edge*tout$total.area))/(tout$n.patches*sum(log(out.patch$perimeter^2))-tout$total.edge^2)) tout$mean.perim.area.ratio = mean(out.patch$perim.area.ratio) tout$sd.perim.area.ratio = sd(out.patch$perim.area.ratio) tout$min.perim.area.ratio = min(out.patch$perim.area.ratio) tout$max.perim.area.ratio = max(out.patch$perim.area.ratio) tout$mean.shape.index = mean(out.patch$shape.index,na.rm=T) tout$sd.shape.index = sd(out.patch$shape.index,na.rm=T) tout$min.shape.index = min(out.patch$shape.index,na.rm=T) tout$max.shape.index = max(out.patch$shape.index,na.rm=T) tout$mean.frac.dim.index = mean(out.patch$frac.dim.index,na.rm=T) tout$sd.frac.dim.index = sd(out.patch$frac.dim.index,na.rm=T) tout$min.frac.dim.index = min(out.patch$frac.dim.index,na.rm=T) tout$max.frac.dim.index = max(out.patch$frac.dim.index,na.rm=T) tout$total.core.area = sum(out.patch$core.area) tout$prop.landscape.core = tout$total.core.area / L.area tout$mean.patch.core.area = mean(out.patch$core.area) tout$sd.patch.core.area = sd(out.patch$core.area) tout$min.patch.core.area = min(out.patch$core.area) tout$max.patch.core.area = max(out.patch$core.area) tout$prop.like.adjacencies = sum(out.patch$n.edges.internal) / sum(out.patch$n.edges.internal+out.patch$n.edges.perimeter*2) tout$aggregation.index = aggregation.index(sum(out.patch$n.cell),sum(out.patch$n.edges.internal)/2) tout$lanscape.division.index = 1-sum((out.patch$n.cell / L.cell)^2) tout$splitting.index = L.area^2 / sum(out.patch$area^2) tout$effective.mesh.size = sum(out.patch$area^2) / L.area tout$patch.cohesion.index = ((1-(sum(out.patch$n.edges.internal)/sum(out.patch$n.edges.internal*sqrt(out.patch$n.cell))) )*((1-1/sqrt(L.cell))/10))*100 #store in out out = rbind(out,as.data.frame(tout)) } return(out) } SDMTools/R/legend.gradient.r0000644000176000001440000000517112370121703015364 0ustar ripleyusers#' Legend Gradient #' #' \code{legend.gradient} creates and displays a gradient legend on a plot or #' image file. The place and size of the legend is defined by coordinates, #' previously identified. #' #' #' @param pnts x and y coordinates of the gradient location in the plot #' @param cols a set of 2 or more colors used in the image, to create the #' gradient #' @param limits to label the min and max values of the gradient in the legend #' @param title to specify the title of the legend #' @param ... other graphical parameters defined by image() or plot() #' @return nothing is returned, a gradient legend is added to a plot or a #' image. #' @author Lorena Falconi \email{lorefalconi@@gmail.com} #' @examples #' #' #' #define a simple binary matrix #' tmat = { matrix(c( 0,0,0,1,0,0,1,1,0,1, #' 0,0,1,0,1,0,0,0,0,0, #' 0,1,NA,1,0,1,0,0,0,1, #' 1,0,1,1,1,0,1,0,0,1, #' 0,1,0,1,0,1,0,0,0,1, #' 0,0,1,0,1,0,0,1,1,0, #' 1,0,0,1,0,0,1,0,0,0, #' 0,1,0,0,0,1,0,NA,NA,NA, #' 0,0,1,1,1,0,0,NA,NA,NA, #' 1,1,1,0,0,0,0,NA,NA,NA),nr=10,byrow=TRUE) } #' #' #do the connected component labeling #' tasc = ConnCompLabel(tmat) #' #' # Create a color ramp #' colormap=c("grey","yellow","yellowgreen","olivedrab1","lightblue4") #' #' #create an image #' image(tasc,col=colormap, axes=FALSE, xlab="", ylab="", ann=FALSE) #' #' #points for the gradient legend #' pnts = cbind(x =c(0.8,0.9,0.9,0.8), y =c(1.0,1.0,0.8,0.8)) #' #' #create the gradient legend #' legend.gradient(pnts,colormap,c("Low","High")) #' #' #' @export legend.gradient legend.gradient = function(pnts,cols=heat.colors(100),limits=c(0,1), title='Legend', ...){ pnts = try(as.matrix(pnts),silent=T) if(!is.matrix(pnts)) stop("you must have a 4x2 matrix") if(dim(pnts)[1]!=4 || dim (pnts)[2]!=2) stop ("Matrix must have dimensions of 4 rows and 2 columms") if(length(cols)<2) stop("You must have 2 or more colors") #break up the min and max into a number of values == length(cols) yvals = seq(min(pnts[, 2]), max(pnts[, 2]), length=length(cols)+1) #cycle through each of the yvals and create polygons for (i in 1:length(cols)){ #create the polygon for that color polygon(x=pnts[,1],y=c(yvals[i],yvals[i],yvals[i+1],yvals[i+1]),col=cols[i],border=F) } #add the text text(max(pnts[,1]),min(pnts[,2]),labels=limits[1],pos=4,...) text(max(pnts[,1]),max(pnts[,2]),labels=limits[2],pos=4,...) text(min(pnts[,1]),max(pnts[,2]),labels=title,adj=c(0,-1),...) } SDMTools/R/auc.R0000644000176000001440000000407612370121703013045 0ustar ripleyusers#' Area Under the Curve of the Reciever Operating Curve #' #' \code{auc} estimates the AUC of the ROC using a Mann-Whitney U statistic. #' \cr \cr \bold{Note:} this method will exclude any missing data. #' #' #' @param obs a vector of observed values which must be 0 for absences and 1 #' for occurrences #' @param pred a vector of the same length as \code{obs} representing the #' predicted values. Values must be between 0 & 1 representing a likelihood. #' @return Returns a single value represting the AUC value. #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @seealso \code{\link{Kappa}}, \code{\link{omission}}, #' \code{\link{sensitivity}}, \code{\link{specificity}}, #' \code{\link{prop.correct}}, \code{\link{confusion.matrix}}, #' \code{\link{accuracy}} #' @examples #' #' #' #create some data #' obs = c(sample(c(0,1),20,replace=TRUE),NA) #' pred = runif(length(obs),0,1) #' #' #calculate AUC from the random data #' auc(obs,pred) #' #' #calculate an example 'perfect' AUC #' obs = obs[order(obs)] #' pred = pred[order(pred)] #' auc(obs,pred) #' #' #' @export auc <- function(obs,pred){ #input checks if (length(obs)!=length(pred)) stop('this requires the same number of observed & predicted values') #deal with NAs if (length(which(is.na(c(obs,pred))))>0) { na = union(which(is.na(obs)),which(is.na(pred))) warning(length(na),' data points removed due to missing data') obs = obs[-na]; pred = pred[-na] } #define the n's and do checks n = length(obs); if (length(which(obs %in% c(0,1)))!=n) stop('observed values must be 0 or 1') #ensure observed are values 0 or 1 n1 = as.double(length(which(obs==1))); n0 = as.double(length(which(obs==0))) if (n1==0 || n1==n) return( NaN ) #if all observed 1's or 0's return NaN ###calc AUC pred0 = pred[which(obs==0)] pred1 = pred[which(obs==1)] ranks = rank(pred,ties.method='average')#define ranks ranks0 = ranks[which(obs==0)] ranks1 = ranks[which(obs==1)] U = n0*n1 + (n0*(n0+1))/2 - sum(ranks0) #calc U stat AUC = U/(n0*n1) #estimate AUC if (AUC<.5) AUC = 1-AUC #return the auc value return(AUC) } SDMTools/R/extract.data.R0000644000176000001440000000414212370121703014651 0ustar ripleyusers#' Spatial Join of Points with Raster Grids #' #' \code{extract.data} extracts data from raster object of class 'asc' (this #' and the adehabitat package), 'RasterLayer' (raster package) or #' 'SpatialGridDataFrame' (sp package) at specified locations. This represents #' a faster version of 'join.asc' of the adehabitat package that assumes all #' locations are within the map extents. \cr \cr \bold{Note:} there is no #' interpolation done here. The values reported are simply the values of the #' raster cell the point falls into. #' #' Implements a faster version of 'join.asc' from the adehabitat package. \cr #' \cr \bold{NOTE:} this assumes all locations are within the extent of the #' raster map. Values outside the extent will be given a value of NA. #' #' @param pts a two-column data frame or matrix with the x and y coordinates of #' the locations of interest. #' @param x a raster matrix of class 'asc' (this and the adehabitat package), #' 'RasterLayer' (raster package) or 'SpatialGridDataFrame' (sp package) #' @return Returns a vector equal in length to the number of locations in pnts. #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @examples #' #' #' #create a simple object of class 'asc' #' tasc = as.asc(matrix(1:50,nr=50,nc=50)); print(tasc) #' #' #define some point locations #' points = data.frame(x=runif(25,1,50),y=runif(25,1,50)) #' #' #extract the data #' points$values = extract.data(points,tasc) #' #' #show the data #' print(points) #' #' #' @export extract.data extract.data <- function(pts, x) { #check if raster from sp or raster package and convert if necessary if (any(class(x) %in% 'RasterLayer')) x = asc.from.raster(x) if (any(class(x) == 'SpatialGridDataFrame')) x = asc.from.sp(x) if (class(x) != 'asc') stop('matrix must be of class "asc"') #check to ensure x is of class asc xy <- getXYcoords(x) xy$x <- xy$x + attr(x, "cellsize")/2 xy$x <- c(xy$x[1] - attr(x, "cellsize"),xy$x) xy$y <- xy$y + attr(x, "cellsize")/2 xy$y <- c(xy$y[1] - attr(x, "cellsize"),xy$y) xf <- as.numeric(cut(pts[, 1], xy$x)) yf <- as.numeric(cut(pts[, 2], xy$y)) return(x[cbind(xf,yf)]) } SDMTools/R/confusion.matrix.R0000644000176000001440000000415412370121703015600 0ustar ripleyusers#' Confusion Matrix #' #' \code{confusion.matrix} calculates a confusion matrix. \cr \cr \bold{Note:} #' this method will exclude any missing data #' #' #' @param obs a vector of observed values which must be 0 for absences and 1 #' for occurrences #' @param pred a vector of the same length as \code{obs} representing the #' predicted values. Values must be between 0 & 1 prepresenting a likelihood. #' @param threshold a single threshold value between 0 & 1 #' @return Returns a confusion matrix (table) of class 'confusion.matrix' #' representing counts of true & false presences and absences. #' @author Jeremy VanDerWal \email{jjvanderwal@@gmail.com} #' @seealso \code{\link{auc}}, \code{\link{Kappa}}, \code{\link{omission}}, #' \code{\link{sensitivity}}, \code{\link{specificity}}, #' \code{\link{prop.correct}}, \code{\link{accuracy}} #' @examples #' #' #' #create some data #' obs = c(sample(c(0,1),20,replace=TRUE),NA); obs = obs[order(obs)] #' pred = runif(length(obs),0,1); pred = pred[order(pred)] #' #' #calculate the confusion matrix #' confusion.matrix(obs,pred,threshold=0.5) #' #' #' @export confusion.matrix <- function(obs,pred,threshold=0.5){ #input checks if (length(obs)!=length(pred)) stop('this requires the same number of observed & predicted values') if (!(length(threshold)==1 & threshold[1]<=1 & threshold[1]>=0)) stop('inappropriate threshold value... must be a single value between 0 & 1.') n = length(obs); if (length(which(obs %in% c(0,1,NA)))!=n) stop('observed values must be 0 or 1') #ensure observed are values 0 or 1 #deal with NAs if (length(which(is.na(c(obs,pred))))>0) { na = union(which(is.na(obs)),which(is.na(pred))) warning(length(na),' data points removed due to missing data') obs = obs[-na]; pred = pred[-na] } #apply the threshold to the prediction if (threshold==0) { pred[which(pred>threshold)] = 1; pred[which(pred<=threshold)] = 0 } else { pred[which(pred>=threshold)] = 1; pred[which(pred0) { na = union(which(is.na(obs)),which(is.na(pred))) warning(length(na),' data points removed due to missing data') obs = obs[-na]; pred = pred[-na] } #define the n's and do checks n = length(obs); if (length(which(obs %in% c(0,1)))!=n) stop('observed values must be 0 or 1') #ensure observed are values 0 or 1 # check / setup the threshold values if (length(threshold)==1 & threshold[1]<=1 & threshold[1]>=0) { thresholds = threshold } else if (length(threshold)==1 & threshold[1]>1) { thresholds = seq(0,1,length=threshold) } else if (length(threshold)>1 & max(threshold)<=1 & min(threshold)>=0) { thresholds = threshold } else { stop('inappropriate threshold values used as input. See help file.') } #cycle through each of the helpfiles out = data.frame(threshold=as.double(thresholds),AUC=NA,omission.rate=NA,sensitivity=NA, specificity=NA,prop.correct=NA,Kappa=NA) for (ii in 1:length(thresholds)) { threshold = thresholds[ii] #convert preditions to binary based on threshold bin.pred = pred; if (threshold==0) { bin.pred[which(bin.pred>threshold)] = 1; bin.pred[which(bin.pred<=threshold)] = 0 } else { bin.pred[which(bin.pred>=threshold)] = 1; bin.pred[which(bin.pred max.num.cells #' tcost[which(tcost>2)]=NA #' #' #no cost matrix #' tcost1 = tcost; tcost1[is.finite(tcost1)]=1; print(tcost1) #' out = lcmw(tasc, tcost1, 2); print(out[1:10,1:10]) #' #' #linear cost #' tcost = tcost/2; print(tcost) #' out = lcmw(tasc, tcost, 2); print(out[1:10,1:10]) #' #' #' @export #' @useDynLib SDMTools getmin movewindow lcmw <- function(mat,mw,mnc) { #check input for class for returning info if (class(mat) == 'asc') { attrib = attributes(mat) } else if (any(class(mat) %in% 'RasterLayer')) { attrib = mat; mat = asc.from.raster(mat) } else if (any(class(mat) == 'SpatialGridDataFrame')) { attrib = mat; mat = asc.from.sp(mat) } else { attrib = attributes(mat) } #buffer edges by full number of distance cells #define the shifts in mat to account for a moving window... vals = expand.grid(Y=-mnc:mnc,X=-mnc:mnc) #define all shifts vals$cost = mw[(mnc+1)+cbind(vals$Y,vals$X)];vals=na.omit(vals) #extract the cost of associated with the move nrow.vals = nrow(vals) #cycle through and get the output if (nrow.vals <5000) { return(.Call("movewindow",mat,as.integer(vals$X),as.integer(vals$Y),as.numeric(vals$cost),PACKAGE='SDMTools')) } else { num.subsets = nrow.vals%/%2000 #run the first set of 2000 tmin = 1; tmax = 2000 #print a status cat('0%...') #create the first part of the moving window out = .Call("movewindow",mat,as.integer(vals$X[tmin:tmax]),as.integer(vals$Y[tmin:tmax]),as.numeric(vals$cost[tmin:tmax]),PACKAGE='SDMTools') #cycle through the remaining data for (i in 1:num.subsets){ if (i