Seurat/ 0000755 0001762 0000144 00000000000 14525771316 011530 5 ustar ligges users Seurat/NAMESPACE 0000644 0001762 0000144 00000061713 14525500037 012745 0 ustar ligges users # Generated by roxygen2: do not edit by hand
S3method("SCTResults<-",SCTAssay)
S3method("SCTResults<-",SCTModel)
S3method("[",SlideSeq)
S3method("[",VisiumV1)
S3method("levels<-",SCTAssay)
S3method(.CalcN,IterableMatrix)
S3method(AnnotateAnchors,IntegrationAnchorSet)
S3method(AnnotateAnchors,TransferAnchorSet)
S3method(AnnotateAnchors,default)
S3method(Cells,SCTAssay)
S3method(Cells,SCTModel)
S3method(Cells,STARmap)
S3method(Cells,SlideSeq)
S3method(Cells,VisiumV1)
S3method(Features,SCTAssay)
S3method(Features,SCTModel)
S3method(FetchData,VisiumV1)
S3method(FindClusters,Seurat)
S3method(FindClusters,default)
S3method(FindMarkers,Assay)
S3method(FindMarkers,DimReduc)
S3method(FindMarkers,SCTAssay)
S3method(FindMarkers,Seurat)
S3method(FindMarkers,StdAssay)
S3method(FindMarkers,default)
S3method(FindNeighbors,Assay)
S3method(FindNeighbors,Seurat)
S3method(FindNeighbors,default)
S3method(FindNeighbors,dist)
S3method(FindSpatiallyVariableFeatures,Assay)
S3method(FindSpatiallyVariableFeatures,Seurat)
S3method(FindSpatiallyVariableFeatures,StdAssay)
S3method(FindSpatiallyVariableFeatures,default)
S3method(FindVariableFeatures,Assay)
S3method(FindVariableFeatures,SCTAssay)
S3method(FindVariableFeatures,Seurat)
S3method(FindVariableFeatures,StdAssay)
S3method(FindVariableFeatures,V3Matrix)
S3method(FindVariableFeatures,default)
S3method(FoldChange,Assay)
S3method(FoldChange,DimReduc)
S3method(FoldChange,SCTAssay)
S3method(FoldChange,Seurat)
S3method(FoldChange,StdAssay)
S3method(FoldChange,default)
S3method(GetAssay,Seurat)
S3method(GetImage,STARmap)
S3method(GetImage,SlideSeq)
S3method(GetImage,VisiumV1)
S3method(GetTissueCoordinates,STARmap)
S3method(GetTissueCoordinates,SlideSeq)
S3method(GetTissueCoordinates,VisiumV1)
S3method(HVFInfo,SCTAssay)
S3method(IntegrateEmbeddings,IntegrationAnchorSet)
S3method(IntegrateEmbeddings,TransferAnchorSet)
S3method(LeverageScore,Assay)
S3method(LeverageScore,Seurat)
S3method(LeverageScore,StdAssay)
S3method(LeverageScore,default)
S3method(LogNormalize,IterableMatrix)
S3method(LogNormalize,V3Matrix)
S3method(LogNormalize,data.frame)
S3method(LogNormalize,default)
S3method(MappingScore,AnchorSet)
S3method(MappingScore,default)
S3method(NormalizeData,Assay)
S3method(NormalizeData,Seurat)
S3method(NormalizeData,StdAssay)
S3method(NormalizeData,V3Matrix)
S3method(NormalizeData,default)
S3method(ProjectCellEmbeddings,Assay)
S3method(ProjectCellEmbeddings,IterableMatrix)
S3method(ProjectCellEmbeddings,SCTAssay)
S3method(ProjectCellEmbeddings,Seurat)
S3method(ProjectCellEmbeddings,StdAssay)
S3method(ProjectCellEmbeddings,default)
S3method(ProjectUMAP,DimReduc)
S3method(ProjectUMAP,Seurat)
S3method(ProjectUMAP,default)
S3method(PseudobulkExpression,Assay)
S3method(PseudobulkExpression,Seurat)
S3method(PseudobulkExpression,StdAssay)
S3method(Radius,STARmap)
S3method(Radius,SlideSeq)
S3method(Radius,VisiumV1)
S3method(RenameCells,SCTAssay)
S3method(RenameCells,STARmap)
S3method(RenameCells,SlideSeq)
S3method(RenameCells,VisiumV1)
S3method(RunCCA,Seurat)
S3method(RunCCA,default)
S3method(RunGraphLaplacian,Seurat)
S3method(RunGraphLaplacian,default)
S3method(RunICA,Assay)
S3method(RunICA,Seurat)
S3method(RunICA,default)
S3method(RunLDA,Assay)
S3method(RunLDA,Seurat)
S3method(RunLDA,default)
S3method(RunPCA,Assay)
S3method(RunPCA,Seurat)
S3method(RunPCA,Seurat5)
S3method(RunPCA,StdAssay)
S3method(RunPCA,default)
S3method(RunSLSI,Assay)
S3method(RunSLSI,Seurat)
S3method(RunSLSI,default)
S3method(RunSPCA,Assay)
S3method(RunSPCA,Assay5)
S3method(RunSPCA,Seurat)
S3method(RunSPCA,default)
S3method(RunTSNE,DimReduc)
S3method(RunTSNE,Seurat)
S3method(RunTSNE,dist)
S3method(RunTSNE,matrix)
S3method(RunUMAP,Graph)
S3method(RunUMAP,Neighbor)
S3method(RunUMAP,Seurat)
S3method(RunUMAP,default)
S3method(SCTResults,SCTAssay)
S3method(SCTResults,SCTModel)
S3method(SCTResults,Seurat)
S3method(SCTransform,Assay)
S3method(SCTransform,IterableMatrix)
S3method(SCTransform,Seurat)
S3method(SCTransform,StdAssay)
S3method(SCTransform,default)
S3method(ScaleData,Assay)
S3method(ScaleData,IterableMatrix)
S3method(ScaleData,Seurat)
S3method(ScaleData,StdAssay)
S3method(ScaleData,default)
S3method(ScaleFactors,VisiumV1)
S3method(ScoreJackStraw,DimReduc)
S3method(ScoreJackStraw,JackStrawData)
S3method(ScoreJackStraw,Seurat)
S3method(VST,IterableMatrix)
S3method(VST,default)
S3method(VST,dgCMatrix)
S3method(VST,matrix)
S3method(VariableFeatures,SCTAssay)
S3method(VariableFeatures,SCTModel)
S3method(as.CellDataSet,Seurat)
S3method(as.Seurat,CellDataSet)
S3method(as.Seurat,SingleCellExperiment)
S3method(as.SingleCellExperiment,Seurat)
S3method(as.data.frame,Matrix)
S3method(as.sparse,H5Group)
S3method(as.sparse,IterableMatrix)
S3method(components,SCTAssay)
S3method(dim,STARmap)
S3method(dim,SlideSeq)
S3method(dim,VisiumV1)
S3method(fortify,Centroids)
S3method(fortify,Molecules)
S3method(fortify,Segmentation)
S3method(levels,SCTAssay)
S3method(merge,SCTAssay)
S3method(subset,AnchorSet)
S3method(subset,SCTAssay)
S3method(subset,STARmap)
S3method(subset,SlideSeq)
S3method(subset,VisiumV1)
export("%iff%")
export("%||%")
export("DefaultAssay<-")
export("Idents<-")
export("Index<-")
export("JS<-")
export("Key<-")
export("Loadings<-")
export("Misc<-")
export("Project<-")
export("SCTResults<-")
export("Tool<-")
export("VariableFeatures<-")
export(AddAzimuthResults)
export(AddMetaData)
export(AddModuleScore)
export(AggregateExpression)
export(AnnotateAnchors)
export(Assays)
export(AugmentPlot)
export(AutoPointSize)
export(AverageExpression)
export(BGTextColor)
export(BarcodeInflectionsPlot)
export(BlackAndWhite)
export(BlueAndRed)
export(BoldTitle)
export(BridgeCellsRepresentation)
export(BuildClusterTree)
export(BuildNicheAssay)
export(CCAIntegration)
export(CalcPerturbSig)
export(CalculateBarcodeInflections)
export(CaseMatch)
export(CellCycleScoring)
export(CellScatter)
export(CellSelector)
export(Cells)
export(CellsByIdentities)
export(CenterTitle)
export(CollapseEmbeddingOutliers)
export(CollapseSpeciesExpressionMatrix)
export(ColorDimSplit)
export(CombinePlots)
export(Command)
export(CountSketch)
export(CreateAssayObject)
export(CreateCategoryMatrix)
export(CreateDimReducObject)
export(CreateSCTAssayObject)
export(CreateSeuratObject)
export(CustomDistance)
export(CustomPalette)
export(DEenrichRPlot)
export(DarkTheme)
export(DefaultAssay)
export(DietSeurat)
export(DimHeatmap)
export(DimPlot)
export(DiscretePalette)
export(Distances)
export(DoHeatmap)
export(DotPlot)
export(ElbowPlot)
export(Embeddings)
export(ExpMean)
export(ExpSD)
export(ExpVar)
export(FastRPCAIntegration)
export(FastRowScale)
export(FeatureLocator)
export(FeaturePlot)
export(FeatureScatter)
export(FetchData)
export(FetchResiduals)
export(FilterSlideSeq)
export(FindAllMarkers)
export(FindBridgeIntegrationAnchors)
export(FindBridgeTransferAnchors)
export(FindClusters)
export(FindConservedMarkers)
export(FindIntegrationAnchors)
export(FindMarkers)
export(FindMultiModalNeighbors)
export(FindNeighbors)
export(FindSpatiallyVariableFeatures)
export(FindSubCluster)
export(FindTransferAnchors)
export(FindVariableFeatures)
export(FoldChange)
export(FontSize)
export(GaussianSketch)
export(GeneSymbolThesarus)
export(GetAssay)
export(GetAssayData)
export(GetImage)
export(GetIntegrationData)
export(GetResidual)
export(GetTissueCoordinates)
export(GetTransferPredictions)
export(GroupCorrelation)
export(GroupCorrelationPlot)
export(HTODemux)
export(HTOHeatmap)
export(HVFInfo)
export(HarmonyIntegration)
export(HoverLocator)
export(IFeaturePlot)
export(ISpatialDimPlot)
export(ISpatialFeaturePlot)
export(Idents)
export(ImageDimPlot)
export(ImageFeaturePlot)
export(Images)
export(Index)
export(Indices)
export(IntegrateData)
export(IntegrateEmbeddings)
export(IntegrateLayers)
export(Intensity)
export(IsGlobal)
export(JS)
export(JackStraw)
export(JackStrawPlot)
export(JointPCAIntegration)
export(Key)
export(L2CCA)
export(L2Dim)
export(LabelClusters)
export(LabelPoints)
export(LeverageScore)
export(LinkedDimPlot)
export(LinkedFeaturePlot)
export(Load10X_Spatial)
export(LoadAkoya)
export(LoadAnnoyIndex)
export(LoadCurioSeeker)
export(LoadHuBMAPCODEX)
export(LoadNanostring)
export(LoadSTARmap)
export(LoadVizgen)
export(LoadXenium)
export(Loadings)
export(LocalStruct)
export(LogNormalize)
export(LogSeuratCommand)
export(LogVMR)
export(Luminance)
export(MULTIseqDemux)
export(MapQuery)
export(MappingScore)
export(MetaFeature)
export(MinMax)
export(Misc)
export(MixingMetric)
export(MixscapeHeatmap)
export(MixscapeLDA)
export(NNPlot)
export(NNtoGraph)
export(Neighbors)
export(NoAxes)
export(NoGrid)
export(NoLegend)
export(NormalizeData)
export(PCAPlot)
export(PCASigGenes)
export(PCHeatmap)
export(PercentAbove)
export(PercentageFeatureSet)
export(PlotClusterTree)
export(PlotPerturbScore)
export(PolyDimPlot)
export(PolyFeaturePlot)
export(PredictAssay)
export(PrepLDA)
export(PrepSCTFindMarkers)
export(PrepSCTIntegration)
export(PrepareBridgeReference)
export(Project)
export(ProjectCellEmbeddings)
export(ProjectData)
export(ProjectDim)
export(ProjectDimReduc)
export(ProjectIntegration)
export(ProjectUMAP)
export(PseudobulkExpression)
export(PurpleAndYellow)
export(RPCAIntegration)
export(Radius)
export(Read10X)
export(Read10X_Image)
export(Read10X_h5)
export(Read10X_probe_metadata)
export(ReadAkoya)
export(ReadMtx)
export(ReadNanostring)
export(ReadParseBio)
export(ReadSTARsolo)
export(ReadSlideSeq)
export(ReadVitessce)
export(ReadVizgen)
export(ReadXenium)
export(Reductions)
export(RegroupIdents)
export(RelativeCounts)
export(RenameCells)
export(RenameIdents)
export(ReorderIdent)
export(RestoreLegend)
export(RidgePlot)
export(RotatedAxis)
export(RowMergeSparseMatrices)
export(RunCCA)
export(RunGraphLaplacian)
export(RunICA)
export(RunLDA)
export(RunMarkVario)
export(RunMixscape)
export(RunMoransI)
export(RunPCA)
export(RunSLSI)
export(RunSPCA)
export(RunTSNE)
export(RunUMAP)
export(SCTResults)
export(SCTransform)
export(SVFInfo)
export(SampleUMI)
export(SaveAnnoyIndex)
export(ScaleData)
export(ScaleFactors)
export(ScoreJackStraw)
export(SelectIntegrationFeatures)
export(SelectIntegrationFeatures5)
export(SelectSCTIntegrationFeatures)
export(SetAssayData)
export(SetIdent)
export(SetIntegrationData)
export(SetQuantile)
export(SeuratAxes)
export(SeuratTheme)
export(SingleCorPlot)
export(SingleDimPlot)
export(SingleExIPlot)
export(SingleImageMap)
export(SingleImagePlot)
export(SingleRasterMap)
export(SingleSpatialPlot)
export(SketchData)
export(SpatialDimPlot)
export(SpatialFeaturePlot)
export(SpatialPlot)
export(SpatialTheme)
export(SpatiallyVariableFeatures)
export(SplitObject)
export(StashIdent)
export(Stdev)
export(SubsetByBarcodeInflections)
export(TSNEPlot)
export(Tool)
export(TopCells)
export(TopFeatures)
export(TopNeighbors)
export(TransferData)
export(TransferSketchLabels)
export(UMAPPlot)
export(UnSketchEmbeddings)
export(UpdateSCTAssays)
export(UpdateSeuratObject)
export(UpdateSymbolList)
export(VST)
export(VariableFeaturePlot)
export(VariableFeatures)
export(VizDimLoadings)
export(VlnPlot)
export(WhichCells)
export(WhiteBackground)
export(as.CellDataSet)
export(as.Graph)
export(as.Neighbor)
export(as.Seurat)
export(as.SingleCellExperiment)
export(as.sparse)
export(components)
export(scalefactors)
exportClasses(AnchorSet)
exportClasses(Assay)
exportClasses(BridgeReferenceSet)
exportClasses(DimReduc)
exportClasses(Graph)
exportClasses(IntegrationAnchorSet)
exportClasses(IntegrationData)
exportClasses(JackStrawData)
exportClasses(ModalityWeights)
exportClasses(Neighbor)
exportClasses(Seurat)
exportClasses(SeuratCommand)
exportClasses(SpatialImage)
exportClasses(TransferAnchorSet)
exportClasses(VisiumV1)
importClassesFrom(Matrix,dgCMatrix)
importClassesFrom(SeuratObject,Assay)
importClassesFrom(SeuratObject,DimReduc)
importClassesFrom(SeuratObject,Graph)
importClassesFrom(SeuratObject,JackStrawData)
importClassesFrom(SeuratObject,Neighbor)
importClassesFrom(SeuratObject,Seurat)
importClassesFrom(SeuratObject,SeuratCommand)
importClassesFrom(SeuratObject,SpatialImage)
importFrom(KernSmooth,bkde)
importFrom(MASS,ginv)
importFrom(MASS,glm.nb)
importFrom(MASS,lda)
importFrom(Matrix,Matrix)
importFrom(Matrix,as.matrix)
importFrom(Matrix,colMeans)
importFrom(Matrix,colSums)
importFrom(Matrix,crossprod)
importFrom(Matrix,diag)
importFrom(Matrix,qrR)
importFrom(Matrix,readMM)
importFrom(Matrix,rowMeans)
importFrom(Matrix,rowSums)
importFrom(Matrix,sparse.model.matrix)
importFrom(Matrix,sparseMatrix)
importFrom(Matrix,summary)
importFrom(Matrix,t)
importFrom(RANN,nn2)
importFrom(RColorBrewer,brewer.pal)
importFrom(RColorBrewer,brewer.pal.info)
importFrom(ROCR,performance)
importFrom(ROCR,prediction)
importFrom(RSpectra,eigs_sym)
importFrom(Rcpp,evalCpp)
importFrom(RcppAnnoy,AnnoyAngular)
importFrom(RcppAnnoy,AnnoyEuclidean)
importFrom(RcppAnnoy,AnnoyHamming)
importFrom(RcppAnnoy,AnnoyManhattan)
importFrom(RcppHNSW,hnsw_build)
importFrom(RcppHNSW,hnsw_search)
importFrom(Rtsne,Rtsne)
importFrom(SeuratObject,"%!NA%")
importFrom(SeuratObject,"%NA%")
importFrom(SeuratObject,"%iff%")
importFrom(SeuratObject,"%||%")
importFrom(SeuratObject,"DefaultAssay<-")
importFrom(SeuratObject,"DefaultLayer<-")
importFrom(SeuratObject,"Idents<-")
importFrom(SeuratObject,"Index<-")
importFrom(SeuratObject,"JS<-")
importFrom(SeuratObject,"Key<-")
importFrom(SeuratObject,"LayerData<-")
importFrom(SeuratObject,"Loadings<-")
importFrom(SeuratObject,"Misc<-")
importFrom(SeuratObject,"Project<-")
importFrom(SeuratObject,"Tool<-")
importFrom(SeuratObject,"VariableFeatures<-")
importFrom(SeuratObject,.CalcN)
importFrom(SeuratObject,.CheckFmargin)
importFrom(SeuratObject,.FilterObjects)
importFrom(SeuratObject,.IsFutureSeurat)
importFrom(SeuratObject,.MARGIN)
importFrom(SeuratObject,.PropagateList)
importFrom(SeuratObject,.SparseSlots)
importFrom(SeuratObject,AddMetaData)
importFrom(SeuratObject,Assays)
importFrom(SeuratObject,AttachDeps)
importFrom(SeuratObject,Boundaries)
importFrom(SeuratObject,CastAssay)
importFrom(SeuratObject,Cells)
importFrom(SeuratObject,CellsByIdentities)
importFrom(SeuratObject,Command)
importFrom(SeuratObject,CreateAssayObject)
importFrom(SeuratObject,CreateCentroids)
importFrom(SeuratObject,CreateDimReducObject)
importFrom(SeuratObject,CreateFOV)
importFrom(SeuratObject,CreateSegmentation)
importFrom(SeuratObject,CreateSeuratObject)
importFrom(SeuratObject,DefaultAssay)
importFrom(SeuratObject,DefaultBoundary)
importFrom(SeuratObject,DefaultDimReduc)
importFrom(SeuratObject,DefaultFOV)
importFrom(SeuratObject,DefaultLayer)
importFrom(SeuratObject,Distances)
importFrom(SeuratObject,Embeddings)
importFrom(SeuratObject,EmptyDF)
importFrom(SeuratObject,Features)
importFrom(SeuratObject,FetchData)
importFrom(SeuratObject,GetAssayData)
importFrom(SeuratObject,GetImage)
importFrom(SeuratObject,GetTissueCoordinates)
importFrom(SeuratObject,HVFInfo)
importFrom(SeuratObject,Idents)
importFrom(SeuratObject,Images)
importFrom(SeuratObject,Index)
importFrom(SeuratObject,Indices)
importFrom(SeuratObject,IsGlobal)
importFrom(SeuratObject,IsSparse)
importFrom(SeuratObject,JS)
importFrom(SeuratObject,JoinLayers)
importFrom(SeuratObject,Key)
importFrom(SeuratObject,Keys)
importFrom(SeuratObject,LayerData)
importFrom(SeuratObject,Layers)
importFrom(SeuratObject,Loadings)
importFrom(SeuratObject,LogSeuratCommand)
importFrom(SeuratObject,Misc)
importFrom(SeuratObject,Molecules)
importFrom(SeuratObject,Neighbors)
importFrom(SeuratObject,Overlay)
importFrom(SeuratObject,PackageCheck)
importFrom(SeuratObject,Project)
importFrom(SeuratObject,Radius)
importFrom(SeuratObject,Reductions)
importFrom(SeuratObject,RenameAssays)
importFrom(SeuratObject,RenameCells)
importFrom(SeuratObject,RenameIdents)
importFrom(SeuratObject,ReorderIdent)
importFrom(SeuratObject,RowMergeSparseMatrices)
importFrom(SeuratObject,SVFInfo)
importFrom(SeuratObject,SetAssayData)
importFrom(SeuratObject,SetIdent)
importFrom(SeuratObject,SparseEmptyMatrix)
importFrom(SeuratObject,SpatiallyVariableFeatures)
importFrom(SeuratObject,StashIdent)
importFrom(SeuratObject,Stdev)
importFrom(SeuratObject,StitchMatrix)
importFrom(SeuratObject,Tool)
importFrom(SeuratObject,UpdateSeuratObject)
importFrom(SeuratObject,UpdateSlots)
importFrom(SeuratObject,VariableFeatures)
importFrom(SeuratObject,WhichCells)
importFrom(SeuratObject,as.Graph)
importFrom(SeuratObject,as.Neighbor)
importFrom(SeuratObject,as.Seurat)
importFrom(SeuratObject,as.sparse)
importFrom(cluster,clara)
importFrom(cowplot,get_legend)
importFrom(cowplot,plot_grid)
importFrom(cowplot,theme_cowplot)
importFrom(fastDummies,dummy_cols)
importFrom(fitdistrplus,fitdist)
importFrom(future,nbrOfWorkers)
importFrom(future,plan)
importFrom(future.apply,future_lapply)
importFrom(future.apply,future_sapply)
importFrom(generics,components)
importFrom(ggplot2,Geom)
importFrom(ggplot2,GeomPolygon)
importFrom(ggplot2,GeomViolin)
importFrom(ggplot2,aes)
importFrom(ggplot2,aes_string)
importFrom(ggplot2,alpha)
importFrom(ggplot2,annotation_raster)
importFrom(ggplot2,coord_cartesian)
importFrom(ggplot2,coord_fixed)
importFrom(ggplot2,coord_flip)
importFrom(ggplot2,cut_number)
importFrom(ggplot2,discrete_scale)
importFrom(ggplot2,draw_key_point)
importFrom(ggplot2,dup_axis)
importFrom(ggplot2,element_blank)
importFrom(ggplot2,element_line)
importFrom(ggplot2,element_rect)
importFrom(ggplot2,element_text)
importFrom(ggplot2,facet_grid)
importFrom(ggplot2,facet_wrap)
importFrom(ggplot2,fortify)
importFrom(ggplot2,geom_abline)
importFrom(ggplot2,geom_bar)
importFrom(ggplot2,geom_blank)
importFrom(ggplot2,geom_boxplot)
importFrom(ggplot2,geom_density)
importFrom(ggplot2,geom_hline)
importFrom(ggplot2,geom_jitter)
importFrom(ggplot2,geom_label)
importFrom(ggplot2,geom_line)
importFrom(ggplot2,geom_point)
importFrom(ggplot2,geom_polygon)
importFrom(ggplot2,geom_raster)
importFrom(ggplot2,geom_rect)
importFrom(ggplot2,geom_smooth)
importFrom(ggplot2,geom_text)
importFrom(ggplot2,geom_tile)
importFrom(ggplot2,geom_violin)
importFrom(ggplot2,geom_vline)
importFrom(ggplot2,ggplot)
importFrom(ggplot2,ggplot_build)
importFrom(ggplot2,ggproto)
importFrom(ggplot2,ggproto_parent)
importFrom(ggplot2,ggsave)
importFrom(ggplot2,ggtitle)
importFrom(ggplot2,guide_colorbar)
importFrom(ggplot2,guide_legend)
importFrom(ggplot2,guides)
importFrom(ggplot2,labs)
importFrom(ggplot2,layer)
importFrom(ggplot2,layer_scales)
importFrom(ggplot2,margin)
importFrom(ggplot2,position_dodge)
importFrom(ggplot2,position_jitterdodge)
importFrom(ggplot2,scale_alpha)
importFrom(ggplot2,scale_alpha_manual)
importFrom(ggplot2,scale_alpha_ordinal)
importFrom(ggplot2,scale_color_brewer)
importFrom(ggplot2,scale_color_distiller)
importFrom(ggplot2,scale_color_gradient)
importFrom(ggplot2,scale_color_gradientn)
importFrom(ggplot2,scale_color_identity)
importFrom(ggplot2,scale_color_manual)
importFrom(ggplot2,scale_fill_brewer)
importFrom(ggplot2,scale_fill_continuous)
importFrom(ggplot2,scale_fill_gradient)
importFrom(ggplot2,scale_fill_gradientn)
importFrom(ggplot2,scale_fill_manual)
importFrom(ggplot2,scale_fill_viridis_c)
importFrom(ggplot2,scale_radius)
importFrom(ggplot2,scale_size)
importFrom(ggplot2,scale_x_continuous)
importFrom(ggplot2,scale_x_log10)
importFrom(ggplot2,scale_y_continuous)
importFrom(ggplot2,scale_y_discrete)
importFrom(ggplot2,scale_y_log10)
importFrom(ggplot2,stat_density2d)
importFrom(ggplot2,stat_qq)
importFrom(ggplot2,sym)
importFrom(ggplot2,theme)
importFrom(ggplot2,theme_classic)
importFrom(ggplot2,theme_void)
importFrom(ggplot2,transform_position)
importFrom(ggplot2,unit)
importFrom(ggplot2,vars)
importFrom(ggplot2,waiver)
importFrom(ggplot2,xlab)
importFrom(ggplot2,xlim)
importFrom(ggplot2,ylab)
importFrom(ggplot2,ylim)
importFrom(ggrepel,geom_label_repel)
importFrom(ggrepel,geom_text_repel)
importFrom(ggridges,geom_density_ridges)
importFrom(ggridges,theme_ridges)
importFrom(grDevices,as.raster)
importFrom(grDevices,col2rgb)
importFrom(grDevices,colorRampPalette)
importFrom(grDevices,rgb)
importFrom(graphics,axis)
importFrom(graphics,image)
importFrom(graphics,locator)
importFrom(graphics,par)
importFrom(graphics,plot)
importFrom(graphics,plot.new)
importFrom(graphics,smoothScatter)
importFrom(graphics,title)
importFrom(grid,addGrob)
importFrom(grid,editGrob)
importFrom(grid,gTree)
importFrom(grid,gpar)
importFrom(grid,grobName)
importFrom(grid,grobTree)
importFrom(grid,nullGrob)
importFrom(grid,pointsGrob)
importFrom(grid,rasterGrob)
importFrom(grid,unit)
importFrom(grid,viewport)
importFrom(httr,GET)
importFrom(httr,accept_json)
importFrom(httr,build_url)
importFrom(httr,content)
importFrom(httr,parse_url)
importFrom(httr,status_code)
importFrom(httr,timeout)
importFrom(ica,icafast)
importFrom(ica,icaimax)
importFrom(ica,icajade)
importFrom(igraph,E)
importFrom(igraph,graph.adjacency)
importFrom(igraph,graph_from_adj_list)
importFrom(igraph,graph_from_adjacency_matrix)
importFrom(igraph,plot.igraph)
importFrom(irlba,irlba)
importFrom(jsonlite,fromJSON)
importFrom(jsonlite,read_json)
importFrom(leiden,leiden)
importFrom(lifecycle,deprecate_soft)
importFrom(lifecycle,deprecate_stop)
importFrom(lifecycle,deprecate_warn)
importFrom(lifecycle,deprecated)
importFrom(lifecycle,is_present)
importFrom(lmtest,lrtest)
importFrom(matrixStats,rowAnyNAs)
importFrom(matrixStats,rowMeans2)
importFrom(matrixStats,rowSds)
importFrom(matrixStats,rowSums2)
importFrom(methods,"slot<-")
importFrom(methods,.hasSlot)
importFrom(methods,as)
importFrom(methods,getMethod)
importFrom(methods,is)
importFrom(methods,new)
importFrom(methods,setAs)
importFrom(methods,setClass)
importFrom(methods,setClassUnion)
importFrom(methods,setGeneric)
importFrom(methods,setMethod)
importFrom(methods,setOldClass)
importFrom(methods,setValidity)
importFrom(methods,signature)
importFrom(methods,slot)
importFrom(methods,slotNames)
importFrom(miniUI,gadgetTitleBar)
importFrom(miniUI,miniButtonBlock)
importFrom(miniUI,miniContentPanel)
importFrom(miniUI,miniPage)
importFrom(miniUI,miniTitleBarButton)
importFrom(patchwork,wrap_plots)
importFrom(pbapply,pbapply)
importFrom(pbapply,pblapply)
importFrom(pbapply,pbsapply)
importFrom(plotly,add_annotations)
importFrom(plotly,layout)
importFrom(plotly,plot_ly)
importFrom(plotly,raster2uri)
importFrom(png,readPNG)
importFrom(progressr,progressor)
importFrom(purrr,imap)
importFrom(reticulate,import)
importFrom(reticulate,py_module_available)
importFrom(reticulate,py_set_seed)
importFrom(rlang,"!!!")
importFrom(rlang,"!!")
importFrom(rlang,abort)
importFrom(rlang,arg_match)
importFrom(rlang,arg_match0)
importFrom(rlang,as_label)
importFrom(rlang,as_name)
importFrom(rlang,caller_env)
importFrom(rlang,check_installed)
importFrom(rlang,enquo)
importFrom(rlang,exec)
importFrom(rlang,inform)
importFrom(rlang,is_integerish)
importFrom(rlang,is_na)
importFrom(rlang,is_quosure)
importFrom(rlang,is_scalar_character)
importFrom(rlang,is_scalar_integerish)
importFrom(rlang,quo_get_env)
importFrom(rlang,quo_get_expr)
importFrom(rlang,sym)
importFrom(rlang,warn)
importFrom(scales,brewer_pal)
importFrom(scales,hue_pal)
importFrom(scales,rescale)
importFrom(scales,squish_infinite)
importFrom(scales,zero_range)
importFrom(scattermore,geom_scattermore)
importFrom(sctransform,correct_counts)
importFrom(sctransform,get_residual_var)
importFrom(sctransform,get_residuals)
importFrom(sctransform,vst)
importFrom(shiny,brushOpts)
importFrom(shiny,brushedPoints)
importFrom(shiny,clickOpts)
importFrom(shiny,fillRow)
importFrom(shiny,hoverOpts)
importFrom(shiny,nearPoints)
importFrom(shiny,observe)
importFrom(shiny,observeEvent)
importFrom(shiny,plotOutput)
importFrom(shiny,reactiveValues)
importFrom(shiny,renderPlot)
importFrom(shiny,renderPrint)
importFrom(shiny,runGadget)
importFrom(shiny,selectInput)
importFrom(shiny,sidebarPanel)
importFrom(shiny,sliderInput)
importFrom(shiny,stopApp)
importFrom(shiny,updateSelectInput)
importFrom(shiny,verbatimTextOutput)
importFrom(spatstat.explore,markvario)
importFrom(spatstat.geom,ppp)
importFrom(stats,aggregate)
importFrom(stats,anova)
importFrom(stats,approxfun)
importFrom(stats,as.dist)
importFrom(stats,as.formula)
importFrom(stats,ave)
importFrom(stats,coef)
importFrom(stats,complete.cases)
importFrom(stats,cor)
importFrom(stats,dist)
importFrom(stats,dnorm)
importFrom(stats,glm)
importFrom(stats,hclust)
importFrom(stats,kmeans)
importFrom(stats,lm)
importFrom(stats,loess)
importFrom(stats,median)
importFrom(stats,na.omit)
importFrom(stats,p.adjust)
importFrom(stats,pchisq)
importFrom(stats,pnbinom)
importFrom(stats,poisson)
importFrom(stats,prcomp)
importFrom(stats,predict)
importFrom(stats,prop.test)
importFrom(stats,quantile)
importFrom(stats,qunif)
importFrom(stats,relevel)
importFrom(stats,residuals)
importFrom(stats,rnorm)
importFrom(stats,runif)
importFrom(stats,sd)
importFrom(stats,setNames)
importFrom(stats,t.test)
importFrom(stats,var)
importFrom(stats,wilcox.test)
importFrom(tibble,tibble)
importFrom(tools,file_ext)
importFrom(tools,file_path_sans_ext)
importFrom(utils,argsAnywhere)
importFrom(utils,capture.output)
importFrom(utils,file_test)
importFrom(utils,globalVariables)
importFrom(utils,head)
importFrom(utils,isS3method)
importFrom(utils,isS3stdGeneric)
importFrom(utils,lsf.str)
importFrom(utils,methods)
importFrom(utils,packageVersion)
importFrom(utils,read.csv)
importFrom(utils,read.delim)
importFrom(utils,read.table)
importFrom(utils,setTxtProgressBar)
importFrom(utils,tail)
importFrom(utils,txtProgressBar)
importFrom(utils,write.table)
importFrom(uwot,umap)
importFrom(uwot,umap_transform)
importMethodsFrom(Matrix,t)
useDynLib(Seurat)
Seurat/LICENSE 0000644 0001762 0000144 00000000054 14525500037 012522 0 ustar ligges users YEAR: 2021
COPYRIGHT HOLDER: Seurat authors
Seurat/README.md 0000644 0001762 0000144 00000003007 14525500037 012775 0 ustar ligges users [](https://app.travis-ci.com:443/github/satijalab/seurat)
[](https://ci.appveyor.com/project/satijalab/seurat)
[](https://cran.r-project.org/package=Seurat)
[](https://cran.r-project.org/package=Seurat)
# Seurat v5
Seurat is an R toolkit for single cell genomics, developed and maintained by the Satija Lab at NYGC.
We are excited to release Seurat v5! This updates introduces new functionality for spatial, multimodal, and scalable single-cell analysis.
Seurat v5 is backwards-compatible with previous versions, so that users will continue to be able to re-run existing workflows.
Instructions, documentation, and tutorials can be found at:
* https://satijalab.org/seurat
Seurat is also hosted on GitHub, you can view and clone the repository at
* https://github.com/satijalab/seurat
Seurat has been successfully installed on Mac OS X, Linux, and Windows, using the devtools package to install directly from GitHub
Improvements and new features will be added on a regular basis, please post on the [github page](https://github.com/satijalab/seurat) with any questions or if you would like to contribute
For a version history/changelog, please see the [NEWS file](https://github.com/satijalab/seurat/blob/master/NEWS.md).
Seurat/data/ 0000755 0001762 0000144 00000000000 14525500037 012427 5 ustar ligges users Seurat/data/cc.genes.rda 0000644 0001762 0000144 00000001010 14525500037 014574 0 ustar ligges users BZh91AY&SYJM* H ?* @ݻ[ib&!OI1 =!@j j&=Fh
LM4@#!
4H4҃&i=Cd JHWI#'& kSmj'nKdVZИil1MT"Fy.D UF.7 L4P!hb@.UE]c`[[Q35\VdQp
DYg9=XY*O>- zƀE߮!Pcvo.H'gIJۺl|J
i3dh8IXcHfK$J/ YaU=9֥M
|nR2,b_"iKF)B.$){-L5P"o?4rx&Xms)%I1,vǝ msY8/sg|KO)UriP Seurat/data/cc.genes.updated.2019.rda 0000644 0001762 0000144 00000001032 14525500037 016617 0 ustar ligges users BZh91AY&SY} J ?.N @mb&M=S6 hi j S$@ "QLMC&&i4r4F%F&Z0[!l ;-717IDtBZ*mT
LR8GWԹ&m
x5ʏHAvxㆀ /$ M!y$bK6!cJXIIKhQ-@gXӹ[ڙ5&LLWyuh|չ>blv9`ѯvE(_:CqUN)y!C?Ի,6
wə)
A;{{GjIiDS4\AVEqK(82,zln)ʄ*Җ3DcK&*j#8b~L&lyf"Lk
^DRɚo9
RAOL
˾hR!ikD24^/>}
F߮b?
5..p!a Seurat/man/ 0000755 0001762 0000144 00000000000 14525500056 012272 5 ustar ligges users Seurat/man/Load10X_Spatial.Rd 0000644 0001762 0000144 00000002602 14525500037 015405 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{Load10X_Spatial}
\alias{Load10X_Spatial}
\title{Load a 10x Genomics Visium Spatial Experiment into a \code{Seurat} object}
\usage{
Load10X_Spatial(
data.dir,
filename = "filtered_feature_bc_matrix.h5",
assay = "Spatial",
slice = "slice1",
filter.matrix = TRUE,
to.upper = FALSE,
image = NULL,
...
)
}
\arguments{
\item{data.dir}{Directory containing the H5 file specified by \code{filename}
and the image data in a subdirectory called \code{spatial}}
\item{filename}{Name of H5 file containing the feature barcode matrix}
\item{assay}{Name of the initial assay}
\item{slice}{Name for the stored image of the tissue slice}
\item{filter.matrix}{Only keep spots that have been determined to be over
tissue}
\item{to.upper}{Converts all feature names to upper case. Can be useful when
analyses require comparisons between human and mouse gene names for example.}
\item{image}{Name of image to pull the coordinates from}
\item{...}{Arguments passed to \code{\link{Read10X_h5}}}
}
\value{
A \code{Seurat} object
}
\description{
Load a 10x Genomics Visium Spatial Experiment into a \code{Seurat} object
}
\examples{
\dontrun{
data_dir <- 'path/to/data/directory'
list.files(data_dir) # Should show filtered_feature_bc_matrix.h5
Load10X_Spatial(data.dir = data_dir)
}
}
\concept{preprocessing}
Seurat/man/DimHeatmap.Rd 0000644 0001762 0000144 00000004633 14525500037 014577 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R, R/convenience.R
\name{DimHeatmap}
\alias{DimHeatmap}
\alias{PCHeatmap}
\title{Dimensional reduction heatmap}
\usage{
DimHeatmap(
object,
dims = 1,
nfeatures = 30,
cells = NULL,
reduction = "pca",
disp.min = -2.5,
disp.max = NULL,
balanced = TRUE,
projected = FALSE,
ncol = NULL,
fast = TRUE,
raster = TRUE,
slot = "scale.data",
assays = NULL,
combine = TRUE
)
PCHeatmap(object, ...)
}
\arguments{
\item{object}{Seurat object}
\item{dims}{Dimensions to plot}
\item{nfeatures}{Number of genes to plot}
\item{cells}{A list of cells to plot. If numeric, just plots the top cells.}
\item{reduction}{Which dimensional reduction to use}
\item{disp.min}{Minimum display value (all values below are clipped)}
\item{disp.max}{Maximum display value (all values above are clipped); defaults to 2.5
if \code{slot} is 'scale.data', 6 otherwise}
\item{balanced}{Plot an equal number of genes with both + and - scores.}
\item{projected}{Use the full projected dimensional reduction}
\item{ncol}{Number of columns to plot}
\item{fast}{If true, use \code{image} to generate plots; faster than using ggplot2, but not customizable}
\item{raster}{If true, plot with geom_raster, else use geom_tile. geom_raster may look blurry on
some viewing applications such as Preview due to how the raster is interpolated. Set this to FALSE
if you are encountering that issue (note that plots may take longer to produce/render).}
\item{slot}{Data slot to use, choose from 'raw.data', 'data', or 'scale.data'}
\item{assays}{A vector of assays to pull data from}
\item{combine}{Combine plots into a single \code{\link[patchwork]{patchwork}ed}
ggplot object. If \code{FALSE}, return a list of ggplot objects}
\item{...}{Extra parameters passed to \code{DimHeatmap}}
}
\value{
No return value by default. If using fast = FALSE, will return a
\code{\link[patchwork]{patchwork}ed} ggplot object if combine = TRUE, otherwise
returns a list of ggplot objects
}
\description{
Draws a heatmap focusing on a principal component. Both cells and genes are sorted by their
principal component scores. Allows for nice visualization of sources of heterogeneity in the dataset.
}
\examples{
data("pbmc_small")
DimHeatmap(object = pbmc_small)
}
\seealso{
\code{\link[graphics]{image}} \code{\link[ggplot2]{geom_raster}}
}
\concept{convenience}
\concept{visualization}
Seurat/man/ExpMean.Rd 0000644 0001762 0000144 00000000722 14525500037 014116 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{ExpMean}
\alias{ExpMean}
\title{Calculate the mean of logged values}
\usage{
ExpMean(x, ...)
}
\arguments{
\item{x}{A vector of values}
\item{...}{Other arguments (not used)}
}
\value{
Returns the mean in log-space
}
\description{
Calculate mean of logged values in non-log space (return answer in log-space)
}
\examples{
ExpMean(x = c(1, 2, 3))
}
\concept{utilities}
Seurat/man/SingleRasterMap.Rd 0000644 0001762 0000144 00000002016 14525500037 015617 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{SingleRasterMap}
\alias{SingleRasterMap}
\title{A single heatmap from ggplot2 using geom_raster}
\usage{
SingleRasterMap(
data,
raster = TRUE,
cell.order = NULL,
feature.order = NULL,
colors = PurpleAndYellow(),
disp.min = -2.5,
disp.max = 2.5,
limits = NULL,
group.by = NULL
)
}
\arguments{
\item{data}{A matrix or data frame with data to plot}
\item{raster}{switch between geom_raster and geom_tile}
\item{cell.order}{...}
\item{feature.order}{...}
\item{colors}{A vector of colors to use}
\item{disp.min}{Minimum display value (all values below are clipped)}
\item{disp.max}{Maximum display value (all values above are clipped)}
\item{limits}{A two-length numeric vector with the limits for colors on the plot}
\item{group.by}{A vector to group cells by, should be one grouping identity per cell}
}
\value{
A ggplot2 object
}
\description{
A single heatmap from ggplot2 using geom_raster
}
\keyword{internal}
Seurat/man/JackStrawData-class.Rd 0000644 0001762 0000144 00000000622 14525500037 016346 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reexports.R
\docType{class}
\name{JackStrawData-class}
\alias{JackStrawData-class}
\title{The JackStrawData Class}
\description{
For more details, please see the documentation in
\code{\link[SeuratObject:JackStrawData]{SeuratObject}}
}
\seealso{
\code{\link[SeuratObject:JackStrawData]{SeuratObject::JackStrawData-class}}
}
Seurat/man/cc.genes.updated.2019.Rd 0000644 0001762 0000144 00000002376 14525500037 016274 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{cc.genes.updated.2019}
\alias{cc.genes.updated.2019}
\title{Cell cycle genes: 2019 update}
\format{
A list of two vectors
\describe{
\item{s.genes}{Genes associated with S-phase}
\item{g2m.genes}{Genes associated with G2M-phase}
}
}
\source{
\url{https://www.science.org/doi/abs/10.1126/science.aad0501}
}
\usage{
cc.genes.updated.2019
}
\description{
A list of genes used in cell-cycle regression, updated with 2019 symbols
}
\section{Updated symbols}{
The following symbols were updated from \code{\link{cc.genes}}
\describe{
\item{s.genes}{
\itemize{
\item \emph{MCM2}: \emph{MCM7}
\item \emph{MLF1IP}: \emph{CENPU}
\item \emph{RPA2}: \emph{POLR1B}
\item \emph{BRIP1}: \emph{MRPL36}
}
}
\item{g2m.genes}{
\itemize{
\item \emph{FAM64A}: \emph{PIMREG}
\item \emph{HN1}: \emph{JPT1}
}
}
}
}
\examples{
\dontrun{
cc.genes.updated.2019 <- cc.genes
cc.genes.updated.2019$s.genes <- UpdateSymbolList(symbols = cc.genes.updated.2019$s.genes)
cc.genes.updated.2019$g2m.genes <- UpdateSymbolList(symbols = cc.genes.updated.2019$g2m.genes)
}
}
\seealso{
\code{\link{cc.genes}}
}
\concept{data}
\keyword{datasets}
Seurat/man/NNtoGraph.Rd 0000644 0001762 0000144 00000001064 14525500037 014421 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{NNtoGraph}
\alias{NNtoGraph}
\title{Convert Neighbor class to an asymmetrical Graph class}
\usage{
NNtoGraph(nn.object, col.cells = NULL, weighted = FALSE)
}
\arguments{
\item{nn.object}{A neighbor class object}
\item{col.cells}{Cells names of the neighbors, cell names in nn.object is used by default}
\item{weighted}{Determine if use distance in the Graph}
}
\value{
Returns a Graph object
}
\description{
Convert Neighbor class to an asymmetrical Graph class
}
Seurat/man/ProjectDim.Rd 0000644 0001762 0000144 00000002627 14525500037 014627 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimensional_reduction.R
\name{ProjectDim}
\alias{ProjectDim}
\title{Project Dimensional reduction onto full dataset}
\usage{
ProjectDim(
object,
reduction = "pca",
assay = NULL,
dims.print = 1:5,
nfeatures.print = 20,
overwrite = FALSE,
do.center = FALSE,
verbose = TRUE
)
}
\arguments{
\item{object}{Seurat object}
\item{reduction}{Reduction to use}
\item{assay}{Assay to use}
\item{dims.print}{Number of dims to print features for}
\item{nfeatures.print}{Number of features with highest/lowest loadings to print for
each dimension}
\item{overwrite}{Replace the existing data in feature.loadings}
\item{do.center}{Center the dataset prior to projection (should be set to TRUE)}
\item{verbose}{Print top genes associated with the projected dimensions}
}
\value{
Returns Seurat object with the projected values
}
\description{
Takes a pre-computed dimensional reduction (typically calculated on a subset
of genes) and projects this onto the entire dataset (all genes). Note that
the cell loadings will remain unchanged, but now there are gene loadings for
all genes.
}
\examples{
data("pbmc_small")
pbmc_small
pbmc_small <- ProjectDim(object = pbmc_small, reduction = "pca")
# Vizualize top projected genes in heatmap
DimHeatmap(object = pbmc_small, reduction = "pca", dims = 1, balanced = TRUE)
}
\concept{dimensional_reduction}
Seurat/man/GetAssay.Rd 0000644 0001762 0000144 00000001141 14525500037 014275 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/objects.R
\name{GetAssay}
\alias{GetAssay}
\alias{GetAssay.Seurat}
\title{Get an Assay object from a given Seurat object.}
\usage{
GetAssay(object, ...)
\method{GetAssay}{Seurat}(object, assay = NULL, ...)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods}
\item{assay}{Assay to get}
}
\value{
Returns an Assay object
}
\description{
Get an Assay object from a given Seurat object.
}
\examples{
data("pbmc_small")
GetAssay(object = pbmc_small, assay = "RNA")
}
\concept{objects}
Seurat/man/CalcDispersion.Rd 0000644 0001762 0000144 00000001360 14525500037 015462 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing5.R
\name{CalcDispersion}
\alias{CalcDispersion}
\title{Calculate dispersion of features}
\usage{
CalcDispersion(
object,
mean.function = FastExpMean,
dispersion.function = FastLogVMR,
num.bin = 20,
binning.method = "equal_width",
verbose = TRUE,
...
)
}
\arguments{
\item{object}{Data matrix}
\item{mean.function}{Function to calculate mean}
\item{dispersion.function}{Function to calculate dispersion}
\item{num.bin}{Number of bins to use}
\item{binning.method}{Method to use for binning. Options are 'equal_width' or 'equal_frequency'}
\item{verbose}{Display progress}
}
\description{
Calculate dispersion of features
}
\keyword{internal}
Seurat/man/FindSubCluster.Rd 0000644 0001762 0000144 00000002053 14525500037 015454 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{FindSubCluster}
\alias{FindSubCluster}
\title{Find subclusters under one cluster}
\usage{
FindSubCluster(
object,
cluster,
graph.name,
subcluster.name = "sub.cluster",
resolution = 0.5,
algorithm = 1
)
}
\arguments{
\item{object}{An object}
\item{cluster}{the cluster to be sub-clustered}
\item{graph.name}{Name of graph to use for the clustering algorithm}
\item{subcluster.name}{the name of sub cluster added in the meta.data}
\item{resolution}{Value of the resolution parameter, use a value above
(below) 1.0 if you want to obtain a larger (smaller) number of communities.}
\item{algorithm}{Algorithm for modularity optimization (1 = original Louvain
algorithm; 2 = Louvain algorithm with multilevel refinement; 3 = SLM
algorithm; 4 = Leiden algorithm). Leiden requires the leidenalg python.}
}
\value{
return a object with sub cluster labels in the sub-cluster.name variable
}
\description{
Find subclusters under one cluster
}
\concept{clustering}
Seurat/man/PolyDimPlot.Rd 0000644 0001762 0000144 00000001516 14525500037 014777 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{PolyDimPlot}
\alias{PolyDimPlot}
\title{Polygon DimPlot}
\usage{
PolyDimPlot(
object,
group.by = NULL,
cells = NULL,
poly.data = "spatial",
flip.coords = FALSE
)
}
\arguments{
\item{object}{Seurat object}
\item{group.by}{A grouping variable present in the metadata. Default is to use the groupings present
in the current cell identities (\code{Idents(object = object)})}
\item{cells}{Vector of cells to plot (default is all cells)}
\item{poly.data}{Name of the polygon dataframe in the misc slot}
\item{flip.coords}{Flip x and y coordinates}
}
\value{
Returns a ggplot object
}
\description{
Plot cells as polygons, rather than single points. Color cells by identity, or a categorical variable
in metadata
}
\concept{visualization}
Seurat/man/SubsetByBarcodeInflections.Rd 0000644 0001762 0000144 00000002035 14525500037 017776 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{SubsetByBarcodeInflections}
\alias{SubsetByBarcodeInflections}
\title{Subset a Seurat Object based on the Barcode Distribution Inflection Points}
\usage{
SubsetByBarcodeInflections(object)
}
\arguments{
\item{object}{Seurat object}
}
\value{
Returns a subsetted Seurat object.
}
\description{
This convenience function subsets a Seurat object based on calculated inflection points.
}
\details{
See [CalculateBarcodeInflections()] to calculate inflection points and
[BarcodeInflectionsPlot()] to visualize and test inflection point calculations.
}
\examples{
data("pbmc_small")
pbmc_small <- CalculateBarcodeInflections(
object = pbmc_small,
group.column = 'groups',
threshold.low = 20,
threshold.high = 30
)
SubsetByBarcodeInflections(object = pbmc_small)
}
\seealso{
\code{\link{CalculateBarcodeInflections}} \code{\link{BarcodeInflectionsPlot}}
}
\author{
Robert A. Amezquita, \email{robert.amezquita@fredhutch.org}
}
\concept{preprocessing}
Seurat/man/subset.AnchorSet.Rd 0000644 0001762 0000144 00000002657 14525500037 015764 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{subset.AnchorSet}
\alias{subset.AnchorSet}
\title{Subset an AnchorSet object}
\usage{
\method{subset}{AnchorSet}(
x,
score.threshold = NULL,
disallowed.dataset.pairs = NULL,
dataset.matrix = NULL,
group.by = NULL,
disallowed.ident.pairs = NULL,
ident.matrix = NULL,
...
)
}
\arguments{
\item{x}{object to be subsetted.}
\item{score.threshold}{Only anchor pairs with scores greater than this value
are retained.}
\item{disallowed.dataset.pairs}{Remove any anchors formed between the
provided pairs. E.g. \code{list(c(1, 5), c(1, 2))} filters out any anchors between
datasets 1 and 5 and datasets 1 and 2.}
\item{dataset.matrix}{Provide a binary matrix specifying whether a dataset
pair is allowable (1) or not (0). Should be a dataset x dataset matrix.}
\item{group.by}{Grouping variable to determine allowable ident pairs}
\item{disallowed.ident.pairs}{Remove any anchors formed between provided
ident pairs. E.g. \code{list(c("CD4", "CD8"), c("B-cell", "T-cell"))}}
\item{ident.matrix}{Provide a binary matrix specifying whether an ident pair
is allowable (1) or not (0). Should be an ident x ident symmetric matrix}
\item{...}{further arguments to be passed to or from other methods.}
}
\value{
Returns an \code{\link{AnchorSet}} object with specified anchors
filtered out
}
\description{
Subset an AnchorSet object
}
\concept{objects}
Seurat/man/Read10X.Rd 0000644 0001762 0000144 00000003574 14525500037 013735 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{Read10X}
\alias{Read10X}
\title{Load in data from 10X}
\usage{
Read10X(
data.dir,
gene.column = 2,
cell.column = 1,
unique.features = TRUE,
strip.suffix = FALSE
)
}
\arguments{
\item{data.dir}{Directory containing the matrix.mtx, genes.tsv (or features.tsv), and barcodes.tsv
files provided by 10X. A vector or named vector can be given in order to load
several data directories. If a named vector is given, the cell barcode names
will be prefixed with the name.}
\item{gene.column}{Specify which column of genes.tsv or features.tsv to use for gene names; default is 2}
\item{cell.column}{Specify which column of barcodes.tsv to use for cell names; default is 1}
\item{unique.features}{Make feature names unique (default TRUE)}
\item{strip.suffix}{Remove trailing "-1" if present in all cell barcodes.}
}
\value{
If features.csv indicates the data has multiple data types, a list
containing a sparse matrix of the data from each type will be returned.
Otherwise a sparse matrix containing the expression data will be returned.
}
\description{
Enables easy loading of sparse data matrices provided by 10X genomics.
}
\examples{
\dontrun{
# For output from CellRanger < 3.0
data_dir <- 'path/to/data/directory'
list.files(data_dir) # Should show barcodes.tsv, genes.tsv, and matrix.mtx
expression_matrix <- Read10X(data.dir = data_dir)
seurat_object = CreateSeuratObject(counts = expression_matrix)
# For output from CellRanger >= 3.0 with multiple data types
data_dir <- 'path/to/data/directory'
list.files(data_dir) # Should show barcodes.tsv.gz, features.tsv.gz, and matrix.mtx.gz
data <- Read10X(data.dir = data_dir)
seurat_object = CreateSeuratObject(counts = data$`Gene Expression`)
seurat_object[['Protein']] = CreateAssayObject(counts = data$`Antibody Capture`)
}
}
\concept{preprocessing}
Seurat/man/GetTissueCoordinates.Rd 0000644 0001762 0000144 00000002001 14525500037 016660 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{GetTissueCoordinates.SlideSeq}
\alias{GetTissueCoordinates.SlideSeq}
\alias{GetTissueCoordinates.STARmap}
\alias{GetTissueCoordinates.VisiumV1}
\title{Get Tissue Coordinates}
\usage{
\method{GetTissueCoordinates}{SlideSeq}(object, ...)
\method{GetTissueCoordinates}{STARmap}(object, qhulls = FALSE, ...)
\method{GetTissueCoordinates}{VisiumV1}(
object,
scale = "lowres",
cols = c("imagerow", "imagecol"),
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods}
\item{qhulls}{return qhulls instead of centroids}
\item{scale}{A factor to scale the coordinates by; choose from: 'tissue',
'fiducial', 'hires', 'lowres', or \code{NULL} for no scaling}
\item{cols}{Columns of tissue coordinates data.frame to pull}
}
\description{
Get Tissue Coordinates
}
\seealso{
\code{\link[SeuratObject:GetTissueCoordinates]{SeuratObject::GetTissueCoordinates}}
}
\concept{objects}
\concept{spatial}
Seurat/man/RunMarkVario.Rd 0000644 0001762 0000144 00000001171 14525500037 015140 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{RunMarkVario}
\alias{RunMarkVario}
\title{Run the mark variogram computation on a given position matrix and expression
matrix.}
\usage{
RunMarkVario(spatial.location, data, ...)
}
\arguments{
\item{spatial.location}{A 2 column matrix giving the spatial locations of
each of the data points also in data}
\item{data}{Matrix containing the data used as "marks" (e.g. gene expression)}
\item{...}{Arguments passed to markvario}
}
\description{
Wraps the functionality of markvario from the spatstat package.
}
\concept{preprocessing}
Seurat/man/FoldChange.Rd 0000644 0001762 0000144 00000006707 14525500037 014564 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/differential_expression.R
\name{FoldChange}
\alias{FoldChange}
\alias{FoldChange.default}
\alias{FoldChange.Assay}
\alias{FoldChange.SCTAssay}
\alias{FoldChange.DimReduc}
\alias{FoldChange.Seurat}
\title{Fold Change}
\usage{
FoldChange(object, ...)
\method{FoldChange}{default}(object, cells.1, cells.2, mean.fxn, fc.name, features = NULL, ...)
\method{FoldChange}{Assay}(
object,
cells.1,
cells.2,
features = NULL,
slot = "data",
pseudocount.use = 1,
fc.name = NULL,
mean.fxn = NULL,
base = 2,
norm.method = NULL,
...
)
\method{FoldChange}{SCTAssay}(
object,
cells.1,
cells.2,
features = NULL,
slot = "data",
pseudocount.use = 1,
fc.name = NULL,
mean.fxn = NULL,
base = 2,
...
)
\method{FoldChange}{DimReduc}(
object,
cells.1,
cells.2,
features = NULL,
slot = NULL,
pseudocount.use = 1,
fc.name = NULL,
mean.fxn = NULL,
...
)
\method{FoldChange}{Seurat}(
object,
ident.1 = NULL,
ident.2 = NULL,
group.by = NULL,
subset.ident = NULL,
assay = NULL,
slot = "data",
reduction = NULL,
features = NULL,
pseudocount.use = 1,
mean.fxn = NULL,
base = 2,
fc.name = NULL,
...
)
}
\arguments{
\item{object}{A Seurat object}
\item{...}{Arguments passed to other methods}
\item{cells.1}{Vector of cell names belonging to group 1}
\item{cells.2}{Vector of cell names belonging to group 2}
\item{mean.fxn}{Function to use for fold change or average difference calculation}
\item{fc.name}{Name of the fold change, average difference, or custom function column
in the output data.frame}
\item{features}{Features to calculate fold change for.
If NULL, use all features}
\item{slot}{Slot to pull data from}
\item{pseudocount.use}{Pseudocount to add to averaged expression values when
calculating logFC.}
\item{base}{The base with respect to which logarithms are computed.}
\item{norm.method}{Normalization method for mean function selection
when \code{slot} is \dQuote{\code{data}}}
\item{ident.1}{Identity class to calculate fold change for; pass an object of class
\code{phylo} or 'clustertree' to calculate fold change for a node in a cluster tree;
passing 'clustertree' requires \code{\link{BuildClusterTree}} to have been run}
\item{ident.2}{A second identity class for comparison; if \code{NULL},
use all other cells for comparison; if an object of class \code{phylo} or
'clustertree' is passed to \code{ident.1}, must pass a node to calculate fold change for}
\item{group.by}{Regroup cells into a different identity class prior to
calculating fold change (see example in \code{\link{FindMarkers}})}
\item{subset.ident}{Subset a particular identity class prior to regrouping.
Only relevant if group.by is set (see example in \code{\link{FindMarkers}})}
\item{assay}{Assay to use in fold change calculation}
\item{reduction}{Reduction to use - will calculate average difference on cell embeddings}
}
\value{
Returns a data.frame
}
\description{
Calculate log fold change and percentage of cells expressing each feature
for different identity classes.
}
\details{
If the slot is \code{scale.data} or a reduction is specified, average difference
is returned instead of log fold change and the column is named "avg_diff".
Otherwise, log2 fold change is returned with column named "avg_log2_FC".
}
\examples{
\dontrun{
data("pbmc_small")
FoldChange(pbmc_small, ident.1 = 1)
}
}
\seealso{
\code{FindMarkers}
}
\concept{differential_expression}
Seurat/man/CustomDistance.Rd 0000644 0001762 0000144 00000001471 14525500037 015510 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{CustomDistance}
\alias{CustomDistance}
\title{Run a custom distance function on an input data matrix}
\usage{
CustomDistance(my.mat, my.function, ...)
}
\arguments{
\item{my.mat}{A matrix to calculate distance on}
\item{my.function}{A function to calculate distance}
\item{...}{Extra parameters to my.function}
}
\value{
A distance matrix
}
\description{
Run a custom distance function on an input data matrix
}
\examples{
data("pbmc_small")
# Define custom distance matrix
manhattan.distance <- function(x, y) return(sum(abs(x-y)))
input.data <- GetAssayData(pbmc_small, assay.type = "RNA", slot = "scale.data")
cell.manhattan.dist <- CustomDistance(input.data, manhattan.distance)
}
\author{
Jean Fan
}
\concept{utilities}
Seurat/man/AverageExpression.Rd 0000644 0001762 0000144 00000004671 14525500037 016222 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{AverageExpression}
\alias{AverageExpression}
\title{Averaged feature expression by identity class}
\usage{
AverageExpression(
object,
assays = NULL,
features = NULL,
return.seurat = FALSE,
group.by = "ident",
add.ident = NULL,
layer = "data",
slot = deprecated(),
verbose = TRUE,
...
)
}
\arguments{
\item{object}{Seurat object}
\item{assays}{Which assays to use. Default is all assays}
\item{features}{Features to analyze. Default is all features in the assay}
\item{return.seurat}{Whether to return the data as a Seurat object. Default is FALSE}
\item{group.by}{Category (or vector of categories) for grouping (e.g, ident, replicate, celltype); 'ident' by default
To use multiple categories, specify a vector, such as c('ident', 'replicate', 'celltype')}
\item{add.ident}{(Deprecated). Place an additional label on each cell prior to pseudobulking}
\item{layer}{Layer(s) to use; if multiple layers are given, assumed to follow
the order of 'assays' (if specified) or object's assays}
\item{slot}{(Deprecated). Slots(s) to use}
\item{verbose}{Print messages and show progress bar}
\item{...}{Arguments to be passed to methods such as \code{\link{CreateSeuratObject}}}
}
\value{
Returns a matrix with genes as rows, identity classes as columns.
If return.seurat is TRUE, returns an object of class \code{\link{Seurat}}.
}
\description{
Returns averaged expression values for each identity class.
}
\details{
If layer is set to 'data', this function assumes that the data has been log
normalized and therefore feature values are exponentiated prior to averaging
so that averaging is done in non-log space. Otherwise, if layer is set to
either 'counts' or 'scale.data', no exponentiation is performed prior to averaging.
If \code{return.seurat = TRUE} and layer is not 'scale.data', averaged values
are placed in the 'counts' layer of the returned object and 'log1p'
is run on the averaged counts and placed in the 'data' layer \code{\link{ScaleData}}
is then run on the default assay before returning the object.
If \code{return.seurat = TRUE} and layer is 'scale.data', the 'counts' layer contains
average counts and 'scale.data' is set to the averaged values of 'scale.data'.
}
\examples{
data("pbmc_small")
head(AverageExpression(object = pbmc_small)$RNA)
head(AverageExpression(object = pbmc_small, group.by = c('ident', 'groups'))$RNA)
}
\concept{utilities}
Seurat/man/PredictAssay.Rd 0000644 0001762 0000144 00000002556 14525500037 015163 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{PredictAssay}
\alias{PredictAssay}
\title{Predict value from nearest neighbors}
\usage{
PredictAssay(
object,
nn.idx,
assay,
reduction = NULL,
dims = NULL,
return.assay = TRUE,
slot = "scale.data",
features = NULL,
mean.function = rowMeans,
seed = 4273,
verbose = TRUE
)
}
\arguments{
\item{object}{The object used to calculate knn}
\item{nn.idx}{k near neighbour indices. A cells x k matrix.}
\item{assay}{Assay used for prediction}
\item{reduction}{Cell embedding of the reduction used for prediction}
\item{dims}{Number of dimensions of cell embedding}
\item{return.assay}{Return an assay or a predicted matrix}
\item{slot}{slot used for prediction}
\item{features}{features used for prediction}
\item{mean.function}{the function used to calculate row mean}
\item{seed}{Sets the random seed to check if the nearest neighbor is query
cell}
\item{verbose}{Print progress}
}
\value{
return an assay containing predicted expression value in the data
slot
}
\description{
This function will predict expression or cell embeddings from its k nearest
neighbors index. For each cell, it will average its k neighbors value to get
its new imputed value. It can average expression value in assays and cell
embeddings from dimensional reductions.
}
\concept{integration}
Seurat/man/Radius.Rd 0000644 0001762 0000144 00000000764 14525500037 014016 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{Radius.SlideSeq}
\alias{Radius.SlideSeq}
\alias{Radius.STARmap}
\alias{Radius.VisiumV1}
\title{Get Spot Radius}
\usage{
\method{Radius}{SlideSeq}(object)
\method{Radius}{STARmap}(object)
\method{Radius}{VisiumV1}(object)
}
\arguments{
\item{object}{An image object}
}
\description{
Get Spot Radius
}
\seealso{
\code{\link[SeuratObject:Radius]{SeuratObject::Radius}}
}
\concept{objects}
\concept{spatial}
Seurat/man/SCTAssay-class.Rd 0000644 0001762 0000144 00000005614 14525500037 015323 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{SCTAssay-class}
\alias{SCTAssay-class}
\alias{SCTModel}
\alias{SCTAssay}
\alias{levels.SCTAssay}
\alias{levels<-.SCTAssay}
\title{The SCTModel Class}
\usage{
\method{levels}{SCTAssay}(x)
\method{levels}{SCTAssay}(x) <- value
}
\arguments{
\item{x}{An \code{SCTAssay} object}
\item{value}{New levels, must be in the same order as the levels present}
}
\value{
\code{levels}: SCT model names
\code{levels<-}: \code{x} with updated SCT model names
}
\description{
The SCTModel object is a model and parameters storage from SCTransform.
It can be used to calculate Pearson residuals for new genes.
The SCTAssay object contains all the information found in an \code{\link{Assay}}
object, with extra information from the results of \code{\link{SCTransform}}
}
\section{Slots}{
\describe{
\item{\code{feature.attributes}}{A data.frame with feature attributes in SCTransform}
\item{\code{cell.attributes}}{A data.frame with cell attributes in SCTransform}
\item{\code{clips}}{A list of two numeric of length two specifying the min and max
values the Pearson residual will be clipped to. One for vst and one for
SCTransform}
\item{\code{umi.assay}}{Name of the assay of the seurat object containing UMI matrix
and the default is RNA}
\item{\code{model}}{A formula used in SCTransform}
\item{\code{arguments}}{other information used in SCTransform}
\item{\code{median_umi}}{Median UMI (or scale factor) used to calculate corrected counts}
\item{\code{SCTModel.list}}{A list containing SCT models}
}}
\section{Get and set SCT model names}{
SCT results are named by initial run of \code{\link{SCTransform}} in order
to keep SCT parameters straight between runs. When working with merged
\code{SCTAssay} objects, these model names are important. \code{levels}
allows querying the models present. \code{levels<-} allows the changing of
the names of the models present, useful when merging \code{SCTAssay} objects.
Note: unlike normal \code{\link[base]{levels<-}}, \code{levels<-.SCTAssay}
allows complete changing of model names, not reordering.
}
\section{Creating an \code{SCTAssay} from an \code{Assay}}{
Conversion from an \code{Assay} object to an \code{SCTAssay} object by
is done by adding the additional slots to the object. If \code{from} has
results generated by \code{\link{SCTransform}} from Seurat v3.0.0 to v3.1.1,
the conversion will automagically fill the new slots with the data
}
\examples{
\dontrun{
# SCTAssay objects are generated from SCTransform
pbmc_small <- SCTransform(pbmc_small)
}
\dontrun{
# SCTAssay objects are generated from SCTransform
pbmc_small <- SCTransform(pbmc_small)
pbmc_small[["SCT"]]
}
\dontrun{
# Query and change SCT model names
levels(pbmc_small[['SCT']])
levels(pbmc_small[['SCT']]) <- '3'
levels(pbmc_small[['SCT']])
}
}
\seealso{
\code{\link{Assay}}
\code{\link{Assay}}
}
\concept{objects}
Seurat/man/NNPlot.Rd 0000644 0001762 0000144 00000003661 14525500037 013740 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{NNPlot}
\alias{NNPlot}
\title{Highlight Neighbors in DimPlot}
\usage{
NNPlot(
object,
reduction,
nn.idx,
query.cells,
dims = 1:2,
label = FALSE,
label.size = 4,
repel = FALSE,
sizes.highlight = 2,
pt.size = 1,
cols.highlight = c("#377eb8", "#e41a1c"),
na.value = "#bdbdbd",
order = c("self", "neighbors", "other"),
show.all.cells = TRUE,
...
)
}
\arguments{
\item{object}{Seurat object}
\item{reduction}{Which dimensionality reduction to use. If not specified, first searches for umap, then tsne, then pca}
\item{nn.idx}{the neighbor index of all cells}
\item{query.cells}{cells used to find their neighbors}
\item{dims}{Dimensions to plot, must be a two-length numeric vector specifying x- and y-dimensions}
\item{label}{Whether to label the clusters}
\item{label.size}{Sets size of labels}
\item{repel}{Repel labels}
\item{sizes.highlight}{Size of highlighted cells; will repeat to the length
groups in cells.highlight. If \code{sizes.highlight = TRUE} size of all
points will be this value.}
\item{pt.size}{Adjust point size for plotting}
\item{cols.highlight}{A vector of colors to highlight the cells as; will
repeat to the length groups in cells.highlight}
\item{na.value}{Color value for NA points when using custom scale}
\item{order}{Specify the order of plotting for the idents. This can be
useful for crowded plots if points of interest are being buried. Provide
either a full list of valid idents or a subset to be plotted last (on top)}
\item{show.all.cells}{Show all cells or only query and neighbor cells}
\item{...}{Extra parameters passed to \code{DimPlot}}
}
\value{
A \code{\link[patchwork]{patchwork}ed} ggplot object if
\code{combine = TRUE}; otherwise, a list of ggplot objects
}
\description{
It will color the query cells and the neighbors of the query cells in the
DimPlot
}
\concept{visualization}
Seurat/man/SingleCorPlot.Rd 0000644 0001762 0000144 00000002440 14525500037 015304 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{SingleCorPlot}
\alias{SingleCorPlot}
\title{A single correlation plot}
\usage{
SingleCorPlot(
data,
col.by = NULL,
cols = NULL,
pt.size = NULL,
smooth = FALSE,
rows.highlight = NULL,
legend.title = NULL,
na.value = "grey50",
span = NULL,
raster = NULL,
raster.dpi = NULL,
plot.cor = TRUE,
jitter = TRUE
)
}
\arguments{
\item{data}{A data frame with two columns to be plotted}
\item{col.by}{A vector or factor of values to color the plot by}
\item{cols}{An optional vector of colors to use}
\item{pt.size}{Point size for the plot}
\item{smooth}{Make a smoothed scatter plot}
\item{rows.highlight}{A vector of rows to highlight (like cells.highlight in
\code{\link{SingleDimPlot}})}
\item{legend.title}{Optional legend title}
\item{raster}{Convert points to raster format, default is \code{NULL}
which will automatically use raster if the number of points plotted is
greater than 100,000}
\item{raster.dpi}{the pixel resolution for rastered plots, passed to geom_scattermore().
Default is c(512, 512)}
\item{plot.cor}{...}
\item{jitter}{Jitter for easier visualization of crowded points}
}
\value{
A ggplot2 object
}
\description{
A single correlation plot
}
\keyword{internal}
Seurat/man/RunUMAP.Rd 0000644 0001762 0000144 00000022667 14525500037 014024 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/dimensional_reduction.R
\name{RunUMAP}
\alias{RunUMAP}
\alias{RunUMAP.default}
\alias{RunUMAP.Graph}
\alias{RunUMAP.Neighbor}
\alias{RunUMAP.Seurat}
\title{Run UMAP}
\usage{
RunUMAP(object, ...)
\method{RunUMAP}{default}(
object,
reduction.key = "UMAP_",
assay = NULL,
reduction.model = NULL,
return.model = FALSE,
umap.method = "uwot",
n.neighbors = 30L,
n.components = 2L,
metric = "cosine",
n.epochs = NULL,
learning.rate = 1,
min.dist = 0.3,
spread = 1,
set.op.mix.ratio = 1,
local.connectivity = 1L,
repulsion.strength = 1,
negative.sample.rate = 5,
a = NULL,
b = NULL,
uwot.sgd = FALSE,
seed.use = 42,
metric.kwds = NULL,
angular.rp.forest = FALSE,
densmap = FALSE,
dens.lambda = 2,
dens.frac = 0.3,
dens.var.shift = 0.1,
verbose = TRUE,
...
)
\method{RunUMAP}{Graph}(
object,
assay = NULL,
umap.method = "umap-learn",
n.components = 2L,
metric = "correlation",
n.epochs = 0L,
learning.rate = 1,
min.dist = 0.3,
spread = 1,
repulsion.strength = 1,
negative.sample.rate = 5L,
a = NULL,
b = NULL,
uwot.sgd = FALSE,
seed.use = 42L,
metric.kwds = NULL,
densmap = FALSE,
densmap.kwds = NULL,
verbose = TRUE,
reduction.key = "UMAP_",
...
)
\method{RunUMAP}{Neighbor}(object, reduction.model, ...)
\method{RunUMAP}{Seurat}(
object,
dims = NULL,
reduction = "pca",
features = NULL,
graph = NULL,
assay = DefaultAssay(object = object),
nn.name = NULL,
slot = "data",
umap.method = "uwot",
reduction.model = NULL,
return.model = FALSE,
n.neighbors = 30L,
n.components = 2L,
metric = "cosine",
n.epochs = NULL,
learning.rate = 1,
min.dist = 0.3,
spread = 1,
set.op.mix.ratio = 1,
local.connectivity = 1L,
repulsion.strength = 1,
negative.sample.rate = 5L,
a = NULL,
b = NULL,
uwot.sgd = FALSE,
seed.use = 42L,
metric.kwds = NULL,
angular.rp.forest = FALSE,
densmap = FALSE,
dens.lambda = 2,
dens.frac = 0.3,
dens.var.shift = 0.1,
verbose = TRUE,
reduction.name = "umap",
reduction.key = NULL,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods and UMAP}
\item{reduction.key}{dimensional reduction key, specifies the string before
the number for the dimension names. UMAP by default}
\item{assay}{Assay to pull data for when using \code{features}, or assay used to construct Graph
if running UMAP on a Graph}
\item{reduction.model}{\code{DimReduc} object that contains the umap model}
\item{return.model}{whether UMAP will return the uwot model}
\item{umap.method}{UMAP implementation to run. Can be
\describe{
\item{\code{uwot}:}{Runs umap via the uwot R package}
\item{\code{uwot-learn}:}{Runs umap via the uwot R package and return the learned umap model}
\item{\code{umap-learn}:}{Run the Seurat wrapper of the python umap-learn package}
}}
\item{n.neighbors}{This determines the number of neighboring points used in
local approximations of manifold structure. Larger values will result in more
global structure being preserved at the loss of detailed local structure. In
general this parameter should often be in the range 5 to 50.}
\item{n.components}{The dimension of the space to embed into.}
\item{metric}{metric: This determines the choice of metric used to measure
distance in the input space. A wide variety of metrics are already coded, and
a user defined function can be passed as long as it has been JITd by numba.}
\item{n.epochs}{he number of training epochs to be used in optimizing the low dimensional
embedding. Larger values result in more accurate embeddings. If NULL is specified, a value will
be selected based on the size of the input dataset (200 for large datasets, 500 for small).}
\item{learning.rate}{The initial learning rate for the embedding optimization.}
\item{min.dist}{This controls how tightly the embedding is allowed compress points together.
Larger values ensure embedded points are moreevenly distributed, while smaller values allow the
algorithm to optimise more accurately with regard to local structure. Sensible values are in
the range 0.001 to 0.5.}
\item{spread}{The effective scale of embedded points. In combination with min.dist this
determines how clustered/clumped the embedded points are.}
\item{set.op.mix.ratio}{Interpolate between (fuzzy) union and intersection as the set operation
used to combine local fuzzy simplicial sets to obtain a global fuzzy simplicial sets. Both fuzzy
set operations use the product t-norm. The value of this parameter should be between 0.0 and
1.0; a value of 1.0 will use a pure fuzzy union, while 0.0 will use a pure fuzzy intersection.}
\item{local.connectivity}{The local connectivity required - i.e. the number of nearest neighbors
that should be assumed to be connected at a local level. The higher this value the more connected
the manifold becomes locally. In practice this should be not more than the local intrinsic
dimension of the manifold.}
\item{repulsion.strength}{Weighting applied to negative samples in low dimensional embedding
optimization. Values higher than one will result in greater weight being given to negative
samples.}
\item{negative.sample.rate}{The number of negative samples to select per positive sample in the
optimization process. Increasing this value will result in greater repulsive force being applied,
greater optimization cost, but slightly more accuracy.}
\item{a}{More specific parameters controlling the embedding. If NULL, these values are set
automatically as determined by min. dist and spread. Parameter of differentiable approximation of
right adjoint functor.}
\item{b}{More specific parameters controlling the embedding. If NULL, these values are set
automatically as determined by min. dist and spread. Parameter of differentiable approximation of
right adjoint functor.}
\item{uwot.sgd}{Set \code{uwot::umap(fast_sgd = TRUE)}; see \code{\link[uwot]{umap}} for more details}
\item{seed.use}{Set a random seed. By default, sets the seed to 42. Setting
NULL will not set a seed}
\item{metric.kwds}{A dictionary of arguments to pass on to the metric, such as the p value for
Minkowski distance. If NULL then no arguments are passed on.}
\item{angular.rp.forest}{Whether to use an angular random projection forest to initialise the
approximate nearest neighbor search. This can be faster, but is mostly on useful for metric that
use an angular style distance such as cosine, correlation etc. In the case of those metrics
angular forests will be chosen automatically.}
\item{densmap}{Whether to use the density-augmented objective of densMAP.
Turning on this option generates an embedding where the local densities
are encouraged to be correlated with those in the original space.
Parameters below with the prefix ‘dens’ further control the behavior
of this extension. Default is FALSE. Only compatible with 'umap-learn' method
and version of umap-learn >= 0.5.0}
\item{dens.lambda}{Specific parameter which controls the regularization weight
of the density correlation term in densMAP. Higher values prioritize density
preservation over the UMAP objective, and vice versa for values closer to zero.
Setting this parameter to zero is equivalent to running the original UMAP algorithm.
Default value is 2.}
\item{dens.frac}{Specific parameter which controls the fraction of epochs
(between 0 and 1) where the density-augmented objective is used in densMAP.
The first (1 - dens_frac) fraction of epochs optimize the original UMAP
objective before introducing the density correlation term. Default is 0.3.}
\item{dens.var.shift}{Specific parameter which specifies a small constant
added to the variance of local radii in the embedding when calculating
the density correlation objective to prevent numerical instability from
dividing by a small number. Default is 0.1.}
\item{verbose}{Controls verbosity}
\item{densmap.kwds}{A dictionary of arguments to pass on to the densMAP optimization.}
\item{dims}{Which dimensions to use as input features, used only if
\code{features} is NULL}
\item{reduction}{Which dimensional reduction (PCA or ICA) to use for the
UMAP input. Default is PCA}
\item{features}{If set, run UMAP on this subset of features (instead of running on a
set of reduced dimensions). Not set (NULL) by default; \code{dims} must be NULL to run
on features}
\item{graph}{Name of graph on which to run UMAP}
\item{nn.name}{Name of knn output on which to run UMAP}
\item{slot}{The slot used to pull data for when using \code{features}. data slot is by default.}
\item{reduction.name}{Name to store dimensional reduction under in the Seurat object}
}
\value{
Returns a Seurat object containing a UMAP representation
}
\description{
Runs the Uniform Manifold Approximation and Projection (UMAP) dimensional
reduction technique. To run using \code{umap.method="umap-learn"}, you must
first install the umap-learn python package (e.g. via
\code{pip install umap-learn}). Details on this package can be
found here: \url{https://github.com/lmcinnes/umap}. For a more in depth
discussion of the mathematics underlying UMAP, see the ArXiv paper here:
\url{https://arxiv.org/abs/1802.03426}.
}
\examples{
\dontrun{
data("pbmc_small")
pbmc_small
# Run UMAP map on first 5 PCs
pbmc_small <- RunUMAP(object = pbmc_small, dims = 1:5)
# Plot results
DimPlot(object = pbmc_small, reduction = 'umap')
}
}
\references{
McInnes, L, Healy, J, UMAP: Uniform Manifold Approximation and
Projection for Dimension Reduction, ArXiv e-prints 1802.03426, 2018
}
\concept{dimensional_reduction}
Seurat/man/ScaleFactors.Rd 0000644 0001762 0000144 00000001613 14525500037 015132 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/objects.R
\name{ScaleFactors}
\alias{ScaleFactors}
\alias{scalefactors}
\alias{ScaleFactors.VisiumV1}
\title{Get image scale factors}
\usage{
ScaleFactors(object, ...)
scalefactors(spot, fiducial, hires, lowres)
\method{ScaleFactors}{VisiumV1}(object, ...)
\method{ScaleFactors}{VisiumV1}(object, ...)
}
\arguments{
\item{object}{An object to get scale factors from}
\item{...}{Arguments passed to other methods}
\item{spot}{Spot full resolution scale factor}
\item{fiducial}{Fiducial full resolution scale factor}
\item{hires}{High resolutoin scale factor}
\item{lowres}{Low resolution scale factor}
}
\value{
An object of class \code{scalefactors}
}
\description{
Get image scale factors
}
\note{
\code{scalefactors} objects can be created with \code{scalefactors()}
}
\concept{objects}
\concept{spatial}
Seurat/man/ExpSD.Rd 0000644 0001762 0000144 00000000667 14525500037 013554 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{ExpSD}
\alias{ExpSD}
\title{Calculate the standard deviation of logged values}
\usage{
ExpSD(x)
}
\arguments{
\item{x}{A vector of values}
}
\value{
Returns the standard deviation in log-space
}
\description{
Calculate SD of logged values in non-log space (return answer in log-space)
}
\examples{
ExpSD(x = c(1, 2, 3))
}
\concept{utilities}
Seurat/man/DISP.Rd 0000644 0001762 0000144 00000000712 14525500037 013317 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing5.R
\name{DISP}
\alias{DISP}
\title{Find variable features based on dispersion}
\usage{
DISP(data, nselect = 2000L, verbose = TRUE, ...)
}
\arguments{
\item{data}{Data matrix}
\item{nselect}{Number of top features to select based on dispersion values}
\item{verbose}{Display progress}
}
\description{
Find variable features based on dispersion
}
\keyword{internal}
Seurat/man/FindMultiModalNeighbors.Rd 0000644 0001762 0000144 00000004510 14525500037 017271 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{FindMultiModalNeighbors}
\alias{FindMultiModalNeighbors}
\title{Construct weighted nearest neighbor graph}
\usage{
FindMultiModalNeighbors(
object,
reduction.list,
dims.list,
k.nn = 20,
l2.norm = TRUE,
knn.graph.name = "wknn",
snn.graph.name = "wsnn",
weighted.nn.name = "weighted.nn",
modality.weight.name = NULL,
knn.range = 200,
prune.SNN = 1/15,
sd.scale = 1,
cross.contant.list = NULL,
smooth = FALSE,
return.intermediate = FALSE,
modality.weight = NULL,
verbose = TRUE
)
}
\arguments{
\item{object}{A Seurat object}
\item{reduction.list}{A list of two dimensional reductions, one for each of
the modalities to be integrated}
\item{dims.list}{A list containing the dimensions for each reduction to use}
\item{k.nn}{the number of multimodal neighbors to compute. 20 by default}
\item{l2.norm}{Perform L2 normalization on the cell embeddings after
dimensional reduction. TRUE by default.}
\item{knn.graph.name}{Multimodal knn graph name}
\item{snn.graph.name}{Multimodal snn graph name}
\item{weighted.nn.name}{Multimodal neighbor object name}
\item{modality.weight.name}{Variable name to store modality weight in object
meta data}
\item{knn.range}{The number of approximate neighbors to compute}
\item{prune.SNN}{Cutoff not to discard edge in SNN graph}
\item{sd.scale}{The scaling factor for kernel width. 1 by default}
\item{cross.contant.list}{Constant used to avoid divide-by-zero errors. 1e-4
by default}
\item{smooth}{Smoothing modality score across each individual modality
neighbors. FALSE by default}
\item{return.intermediate}{Store intermediate results in misc}
\item{modality.weight}{A \code{\link{ModalityWeights}} object generated by
\code{FindModalityWeights}}
\item{verbose}{Print progress bars and output}
}
\value{
Seurat object containing a nearest-neighbor object, KNN graph, and
SNN graph - each based on a weighted combination of modalities.
}
\description{
This function will construct a weighted nearest neighbor (WNN) graph. For
each cell, we identify the nearest neighbors based on a weighted combination
of two modalities. Takes as input two dimensional reductions, one computed
for each modality.Other parameters are listed for debugging, but can be left
as default values.
}
\concept{clustering}
Seurat/man/HoverLocator.Rd 0000644 0001762 0000144 00000001674 14525500037 015177 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{HoverLocator}
\alias{HoverLocator}
\title{Hover Locator}
\usage{
HoverLocator(plot, information = NULL, axes = TRUE, dark.theme = FALSE, ...)
}
\arguments{
\item{plot}{A ggplot2 plot}
\item{information}{An optional dataframe or matrix of extra information to be displayed on hover}
\item{axes}{Display or hide x- and y-axes}
\item{dark.theme}{Plot using a dark theme?}
\item{...}{Extra parameters to be passed to \code{\link[plotly]{layout}}}
}
\description{
Get quick information from a scatterplot by hovering over points
}
\examples{
\dontrun{
data("pbmc_small")
plot <- DimPlot(object = pbmc_small)
HoverLocator(plot = plot, information = FetchData(object = pbmc_small, vars = 'percent.mito'))
}
}
\seealso{
\code{\link[plotly]{layout}} \code{\link[ggplot2]{ggplot_build}}
\code{\link{DimPlot}} \code{\link{FeaturePlot}}
}
\concept{visualization}
Seurat/man/AddAzimuthScores.Rd 0000644 0001762 0000144 00000001122 14525500037 015765 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{AddAzimuthScores}
\alias{AddAzimuthScores}
\title{Add Azimuth Scores}
\usage{
AddAzimuthScores(object, filename)
}
\arguments{
\item{object}{A \code{\link[SeuratObject]{Seurat}} object}
\item{filename}{Path to Azimuth mapping scores file}
}
\value{
\code{object} with the mapping scores added
}
\description{
Add mapping and prediction scores from Azimuth to a
\code{\link[SeuratObject]{Seurat}} object
}
\examples{
\dontrun{
object <- AddAzimuthScores(object, filename = "azimuth_pred.tsv")
}
}
Seurat/man/LeverageScore.Rd 0000644 0001762 0000144 00000005100 14525500037 015302 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/sketching.R
\name{LeverageScore}
\alias{LeverageScore}
\alias{LeverageScore.default}
\alias{LeverageScore.StdAssay}
\alias{LeverageScore.Assay}
\alias{LeverageScore.Seurat}
\title{Leverage Score Calculation}
\usage{
LeverageScore(object, ...)
\method{LeverageScore}{default}(
object,
nsketch = 5000L,
ndims = NULL,
method = CountSketch,
eps = 0.5,
seed = 123L,
verbose = TRUE,
...
)
\method{LeverageScore}{StdAssay}(
object,
nsketch = 5000L,
ndims = NULL,
method = CountSketch,
vf.method = NULL,
layer = "data",
eps = 0.5,
seed = 123L,
verbose = TRUE,
...
)
\method{LeverageScore}{Assay}(
object,
nsketch = 5000L,
ndims = NULL,
method = CountSketch,
vf.method = NULL,
layer = "data",
eps = 0.5,
seed = 123L,
verbose = TRUE,
...
)
\method{LeverageScore}{Seurat}(
object,
assay = NULL,
nsketch = 5000L,
ndims = NULL,
var.name = "leverage.score",
over.write = FALSE,
method = CountSketch,
vf.method = NULL,
layer = "data",
eps = 0.5,
seed = 123L,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{A matrix-like object}
\item{...}{Arguments passed to other methods}
\item{nsketch}{A positive integer. The number of sketches to be used in the approximation.
Default is 5000.}
\item{ndims}{A positive integer or NULL. The number of dimensions to use. If NULL, the number
of dimensions will default to the number of columns in the object.}
\item{method}{The sketching method to use, defaults to CountSketch.}
\item{eps}{A numeric. The error tolerance for the approximation in Johnson–Lindenstrauss embeddings,
defaults to 0.5.}
\item{seed}{A positive integer. The seed for the random number generator, defaults to 123.}
\item{verbose}{Print progress and diagnostic messages}
\item{vf.method}{VariableFeatures method}
\item{layer}{layer to use}
\item{assay}{assay to use}
\item{var.name}{name of slot to store leverage scores}
\item{over.write}{whether to overwrite slot that currently stores leverage scores. Defaults
to FALSE, in which case the 'var.name' is modified if it already exists in the object}
}
\description{
This function computes the leverage scores for a given object
It uses the concept of sketching and random projections. The function provides an approximation
to the leverage scores using a scalable method suitable for large matrices.
}
\references{
Clarkson, K. L. & Woodruff, D. P.
Low-rank approximation and regression in input sparsity time.
JACM 63, 1–45 (2017). \url{https://dl.acm.org/doi/10.1145/3019134};
}
Seurat/man/SingleSpatialPlot.Rd 0000644 0001762 0000144 00000004040 14525500037 016154 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{SingleSpatialPlot}
\alias{SingleSpatialPlot}
\title{Base plotting function for all Spatial plots}
\usage{
SingleSpatialPlot(
data,
image,
cols = NULL,
image.alpha = 1,
pt.alpha = NULL,
crop = TRUE,
pt.size.factor = NULL,
stroke = 0.25,
col.by = NULL,
alpha.by = NULL,
cells.highlight = NULL,
cols.highlight = c("#DE2D26", "grey50"),
geom = c("spatial", "interactive", "poly"),
na.value = "grey50"
)
}
\arguments{
\item{data}{Data.frame with info to be plotted}
\item{image}{\code{SpatialImage} object to be plotted}
\item{cols}{Vector of colors, each color corresponds to an identity class.
This may also be a single character
or numeric value corresponding to a palette as specified by
\code{\link[RColorBrewer]{brewer.pal.info}}. By default, ggplot2 assigns
colors}
\item{image.alpha}{Adjust the opacity of the background images. Set to 0 to
remove.}
\item{pt.alpha}{Adjust the opacity of the points if plotting a
\code{SpatialDimPlot}}
\item{crop}{Crop the plot in to focus on points plotted. Set to \code{FALSE}
to show entire background image.}
\item{pt.size.factor}{Sets the size of the points relative to spot.radius}
\item{stroke}{Control the width of the border around the spots}
\item{col.by}{Mapping variable for the point color}
\item{alpha.by}{Mapping variable for the point alpha value}
\item{cells.highlight}{A list of character or numeric vectors of cells to
highlight. If only one group of cells desired, can simply pass a vector
instead of a list. If set, colors selected cells to the color(s) in
cols.highlight}
\item{cols.highlight}{A vector of colors to highlight the cells as; ordered
the same as the groups in cells.highlight; last color corresponds to
unselected cells.}
\item{geom}{Switch between normal spatial geom and geom to enable hover
functionality}
\item{na.value}{Color for spots with NA values}
}
\value{
A ggplot2 object
}
\description{
Base plotting function for all Spatial plots
}
\keyword{internal}
Seurat/man/cc.genes.Rd 0000644 0001762 0000144 00000000754 14525500037 014253 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{cc.genes}
\alias{cc.genes}
\title{Cell cycle genes}
\format{
A list of two vectors
\describe{
\item{s.genes}{Genes associated with S-phase}
\item{g2m.genes}{Genes associated with G2M-phase}
}
}
\source{
\url{https://www.science.org/doi/abs/10.1126/science.aad0501}
}
\usage{
cc.genes
}
\description{
A list of genes used in cell-cycle regression
}
\concept{data}
\keyword{datasets}
Seurat/man/SingleDimPlot.Rd 0000644 0001762 0000144 00000005076 14525500037 015302 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{SingleDimPlot}
\alias{SingleDimPlot}
\title{Plot a single dimension}
\usage{
SingleDimPlot(
data,
dims,
col.by = NULL,
cols = NULL,
pt.size = NULL,
shape.by = NULL,
alpha = 1,
alpha.by = NULL,
order = NULL,
label = FALSE,
repel = FALSE,
label.size = 4,
cells.highlight = NULL,
cols.highlight = "#DE2D26",
sizes.highlight = 1,
na.value = "grey50",
raster = NULL,
raster.dpi = NULL
)
}
\arguments{
\item{data}{Data to plot}
\item{dims}{A two-length numeric vector with dimensions to use}
\item{col.by}{...}
\item{cols}{Vector of colors, each color corresponds to an identity class.
This may also be a single character or numeric value corresponding to a
palette as specified by \code{\link[RColorBrewer]{brewer.pal.info}}.By
default, ggplot2 assigns colors}
\item{pt.size}{Adjust point size for plotting}
\item{shape.by}{If NULL, all points are circles (default). You can specify
any cell attribute (that can be pulled with \code{\link{FetchData}})
allowing for both different colors and different shapes on cells.}
\item{alpha}{Alpha value for plotting (default is 1)}
\item{alpha.by}{Mapping variable for the point alpha value}
\item{order}{Specify the order of plotting for the idents. This can be
useful for crowded plots if points of interest are being buried. Provide
either a full list of valid idents or a subset to be plotted last (on top).}
\item{label}{Whether to label the clusters}
\item{repel}{Repel labels}
\item{label.size}{Sets size of labels}
\item{cells.highlight}{A list of character or numeric vectors of cells to
highlight. If only one group of cells desired, can simply
pass a vector instead of a list. If set, colors selected cells to the color(s)
in \code{cols.highlight} and other cells black (white if dark.theme = TRUE);
will also resize to the size(s) passed to \code{sizes.highlight}}
\item{cols.highlight}{A vector of colors to highlight the cells as; will
repeat to the length groups in cells.highlight}
\item{sizes.highlight}{Size of highlighted cells; will repeat to the length
groups in cells.highlight}
\item{na.value}{Color value for NA points when using custom scale.}
\item{raster}{Convert points to raster format, default is \code{NULL}
which will automatically use raster if the number of points plotted is
greater than 100,000}
\item{raster.dpi}{the pixel resolution for rastered plots, passed to geom_scattermore().
Default is c(512, 512)}
}
\value{
A ggplot2 object
}
\description{
Plot a single dimension
}
\keyword{internal}
Seurat/man/SampleUMI.Rd 0000644 0001762 0000144 00000001455 14525500037 014361 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{SampleUMI}
\alias{SampleUMI}
\title{Sample UMI}
\usage{
SampleUMI(data, max.umi = 1000, upsample = FALSE, verbose = FALSE)
}
\arguments{
\item{data}{Matrix with the raw count data}
\item{max.umi}{Number of UMIs to sample to}
\item{upsample}{Upsamples all cells with fewer than max.umi}
\item{verbose}{Display the progress bar}
}
\value{
Matrix with downsampled data
}
\description{
Downsample each cell to a specified number of UMIs. Includes
an option to upsample cells below specified UMI as well.
}
\examples{
data("pbmc_small")
counts = as.matrix(x = GetAssayData(object = pbmc_small, assay = "RNA", slot = "counts"))
downsampled = SampleUMI(data = counts)
head(x = downsampled)
}
\concept{preprocessing}
Seurat/man/SingleImageMap.Rd 0000644 0001762 0000144 00000001135 14525500037 015402 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{SingleImageMap}
\alias{SingleImageMap}
\title{A single heatmap from base R using \code{\link[graphics]{image}}}
\usage{
SingleImageMap(data, order = NULL, title = NULL)
}
\arguments{
\item{data}{matrix of data to plot}
\item{order}{optional vector of cell names to specify order in plot}
\item{title}{Title for plot}
}
\value{
No return, generates a base-R heatmap using \code{\link[graphics]{image}}
}
\description{
A single heatmap from base R using \code{\link[graphics]{image}}
}
\keyword{internal}
Seurat/man/HTOHeatmap.Rd 0000644 0001762 0000144 00000003166 14525500037 014520 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{HTOHeatmap}
\alias{HTOHeatmap}
\title{Hashtag oligo heatmap}
\usage{
HTOHeatmap(
object,
assay = "HTO",
classification = paste0(assay, "_classification"),
global.classification = paste0(assay, "_classification.global"),
ncells = 5000,
singlet.names = NULL,
raster = TRUE
)
}
\arguments{
\item{object}{Seurat object. Assumes that the hash tag oligo (HTO) data has been added and normalized, and demultiplexing has been run with HTODemux().}
\item{assay}{Hashtag assay name.}
\item{classification}{The naming for metadata column with classification result from HTODemux().}
\item{global.classification}{The slot for metadata column specifying a cell as singlet/doublet/negative.}
\item{ncells}{Number of cells to plot. Default is to choose 5000 cells by random subsampling, to avoid having to draw exceptionally large heatmaps.}
\item{singlet.names}{Namings for the singlets. Default is to use the same names as HTOs.}
\item{raster}{If true, plot with geom_raster, else use geom_tile. geom_raster may look blurry on
some viewing applications such as Preview due to how the raster is interpolated. Set this to FALSE
if you are encountering that issue (note that plots may take longer to produce/render).}
}
\value{
Returns a ggplot2 plot object.
}
\description{
Draws a heatmap of hashtag oligo signals across singlets/doublets/negative cells. Allows for the visualization of HTO demultiplexing results.
}
\examples{
\dontrun{
object <- HTODemux(object)
HTOHeatmap(object)
}
}
\seealso{
\code{\link{HTODemux}}
}
\concept{visualization}
Seurat/man/ISpatialFeaturePlot.Rd 0000644 0001762 0000144 00000001550 14525500037 016442 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{ISpatialFeaturePlot}
\alias{ISpatialFeaturePlot}
\title{Visualize features spatially and interactively}
\usage{
ISpatialFeaturePlot(
object,
feature,
image = NULL,
slot = "data",
alpha = c(0.1, 1)
)
}
\arguments{
\item{object}{A Seurat object}
\item{feature}{Feature to visualize}
\item{image}{Name of the image to use in the plot}
\item{slot}{If plotting a feature, which data slot to pull from (counts,
data, or scale.data)}
\item{alpha}{Controls opacity of spots. Provide as a vector specifying the
min and max for SpatialFeaturePlot. For SpatialDimPlot, provide a single
alpha value for each plot.}
}
\value{
Returns final plot as a ggplot object
}
\description{
Visualize features spatially and interactively
}
\concept{spatial}
\concept{visualization}
Seurat/man/Assay-class.Rd 0000644 0001762 0000144 00000000627 14525500037 014750 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reexports.R
\docType{class}
\name{Assay-class}
\alias{Assay-class}
\title{The Assay Class}
\description{
The \code{Assay} object is the basic unit of Seurat; for more details, please
see the documentation in \code{\link[SeuratObject:Assay]{SeuratObject}}
}
\seealso{
\code{\link[SeuratObject:Assay]{SeuratObject::Assay-class}}
}
Seurat/man/MixscapeLDA.Rd 0000644 0001762 0000144 00000003230 14525500037 014650 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixscape.R
\name{MixscapeLDA}
\alias{MixscapeLDA}
\title{Linear discriminant analysis on pooled CRISPR screen data.}
\usage{
MixscapeLDA(
object,
assay = NULL,
ndims.print = 1:5,
nfeatures.print = 30,
reduction.key = "LDA_",
seed = 42,
pc.assay = "PRTB",
labels = "gene",
nt.label = "NT",
npcs = 10,
verbose = TRUE,
logfc.threshold = 0.25
)
}
\arguments{
\item{object}{An object of class Seurat.}
\item{assay}{Assay to use for performing Linear Discriminant Analysis (LDA).}
\item{ndims.print}{Number of LDA dimensions to print.}
\item{nfeatures.print}{Number of features to print for each LDA component.}
\item{reduction.key}{Reduction key name.}
\item{seed}{Value for random seed}
\item{pc.assay}{Assay to use for running Principle components analysis.}
\item{labels}{Meta data column with target gene class labels.}
\item{nt.label}{Name of non-targeting cell class.}
\item{npcs}{Number of principle components to use.}
\item{verbose}{Print progress bar.}
\item{logfc.threshold}{Limit testing to genes which show, on average, at least
X-fold difference (log-scale) between the two groups of cells. Default is 0.1
Increasing logfc.threshold speeds up the function, but can miss weaker signals.}
}
\value{
Returns a Seurat object with LDA added in the reduction slot.
}
\description{
This function performs unsupervised PCA on each mixscape class separately and projects each subspace onto all
cells in the data. Finally, it uses the first 10 principle components from each projection as input to lda in MASS package together with mixscape class labels.
}
\concept{mixscape}
Seurat/man/IntegrationData-class.Rd 0000644 0001762 0000144 00000001655 14525500037 016747 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{IntegrationData-class}
\alias{IntegrationData-class}
\alias{IntegrationData}
\title{The IntegrationData Class}
\description{
The IntegrationData object is an intermediate storage container used internally throughout the
integration procedure to hold bits of data that are useful downstream.
}
\section{Slots}{
\describe{
\item{\code{neighbors}}{List of neighborhood information for cells (outputs of \code{RANN::nn2})}
\item{\code{weights}}{Anchor weight matrix}
\item{\code{integration.matrix}}{Integration matrix}
\item{\code{anchors}}{Anchor matrix}
\item{\code{offsets}}{The offsets used to enable cell look up in downstream functions}
\item{\code{objects.ncell}}{Number of cells in each object in the object.list}
\item{\code{sample.tree}}{Sample tree used for ordering multi-dataset integration}
}}
\concept{objects}
Seurat/man/ProjectCellEmbeddings.Rd 0000644 0001762 0000144 00000005547 14525500037 016763 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/integration.R
\name{ProjectCellEmbeddings}
\alias{ProjectCellEmbeddings}
\alias{ProjectCellEmbeddings.Seurat}
\alias{ProjectCellEmbeddings.Assay}
\alias{ProjectCellEmbeddings.SCTAssay}
\alias{ProjectCellEmbeddings.StdAssay}
\alias{ProjectCellEmbeddings.default}
\alias{ProjectCellEmbeddings.IterableMatrix}
\title{Project query data to the reference dimensional reduction}
\usage{
ProjectCellEmbeddings(query, ...)
\method{ProjectCellEmbeddings}{Seurat}(
query,
reference,
query.assay = NULL,
reference.assay = NULL,
reduction = "pca",
dims = 1:50,
normalization.method = c("LogNormalize", "SCT"),
scale = TRUE,
verbose = TRUE,
nCount_UMI = NULL,
feature.mean = NULL,
feature.sd = NULL,
...
)
\method{ProjectCellEmbeddings}{Assay}(
query,
reference,
reference.assay = NULL,
reduction = "pca",
dims = 1:50,
scale = TRUE,
normalization.method = NULL,
verbose = TRUE,
nCount_UMI = NULL,
feature.mean = NULL,
feature.sd = NULL,
...
)
\method{ProjectCellEmbeddings}{SCTAssay}(
query,
reference,
reference.assay = NULL,
reduction = "pca",
dims = 1:50,
scale = TRUE,
normalization.method = NULL,
verbose = TRUE,
nCount_UMI = NULL,
feature.mean = NULL,
feature.sd = NULL,
...
)
\method{ProjectCellEmbeddings}{StdAssay}(
query,
reference,
reference.assay = NULL,
reduction = "pca",
dims = 1:50,
scale = TRUE,
normalization.method = NULL,
verbose = TRUE,
nCount_UMI = NULL,
feature.mean = NULL,
feature.sd = NULL,
...
)
\method{ProjectCellEmbeddings}{default}(
query,
reference,
reference.assay = NULL,
reduction = "pca",
dims = 1:50,
scale = TRUE,
normalization.method = NULL,
verbose = TRUE,
features = NULL,
nCount_UMI = NULL,
feature.mean = NULL,
feature.sd = NULL,
...
)
\method{ProjectCellEmbeddings}{IterableMatrix}(
query,
reference,
reference.assay = NULL,
reduction = "pca",
dims = 1:50,
scale = TRUE,
normalization.method = NULL,
verbose = TRUE,
features = features,
nCount_UMI = NULL,
feature.mean = NULL,
feature.sd = NULL,
block.size = 10000,
...
)
}
\arguments{
\item{query}{An object for query cells}
\item{reference}{An object for reference cells}
\item{query.assay}{Assay name for query object}
\item{reference.assay}{Assay name for reference object}
\item{reduction}{Name of dimensional reduction from reference object}
\item{dims}{Dimensions used for reference dimensional reduction}
\item{scale}{Determine if scale query data based on reference data variance}
\item{verbose}{Print progress}
\item{feature.mean}{Mean of features in reference}
\item{feature.sd}{Standard variance of features in reference}
}
\value{
A matrix with projected cell embeddings
}
\description{
Project query data to the reference dimensional reduction
}
\keyword{internal}
Seurat/man/RunMixscape.Rd 0000644 0001762 0000144 00000006037 14525500037 015024 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixscape.R
\name{RunMixscape}
\alias{RunMixscape}
\title{Run Mixscape}
\usage{
RunMixscape(
object,
assay = "PRTB",
slot = "scale.data",
labels = "gene",
nt.class.name = "NT",
new.class.name = "mixscape_class",
min.de.genes = 5,
min.cells = 5,
de.assay = "RNA",
logfc.threshold = 0.25,
iter.num = 10,
verbose = FALSE,
split.by = NULL,
fine.mode = FALSE,
fine.mode.labels = "guide_ID",
prtb.type = "KO"
)
}
\arguments{
\item{object}{An object of class Seurat.}
\item{assay}{Assay to use for mixscape classification.}
\item{slot}{Assay data slot to use.}
\item{labels}{metadata column with target gene labels.}
\item{nt.class.name}{Classification name of non-targeting gRNA cells.}
\item{new.class.name}{Name of mixscape classification to be stored in
metadata.}
\item{min.de.genes}{Required number of genes that are differentially
expressed for method to separate perturbed and non-perturbed cells.}
\item{min.cells}{Minimum number of cells in target gene class. If fewer than
this many cells are assigned to a target gene class during classification,
all are assigned NP.}
\item{de.assay}{Assay to use when performing differential expression analysis.
Usually RNA.}
\item{logfc.threshold}{Limit testing to genes which show, on average,
at least X-fold difference (log-scale) between the two groups of cells.
Default is 0.25 Increasing logfc.threshold speeds up the function, but can miss
weaker signals.}
\item{iter.num}{Number of normalmixEM iterations to run if convergence does
not occur.}
\item{verbose}{Display messages}
\item{split.by}{metadata column with experimental condition/cell type
classification information. This is meant to be used to account for cases a
perturbation is condition/cell type -specific.}
\item{fine.mode}{When this is equal to TRUE, DE genes for each target gene
class will be calculated for each gRNA separately and pooled into one DE list
for calculating the perturbation score of every cell and their subsequent
classification.}
\item{fine.mode.labels}{metadata column with gRNA ID labels.}
\item{prtb.type}{specify type of CRISPR perturbation expected for labeling mixscape classifications. Default is KO.}
}
\value{
Returns Seurat object with with the following information in the
meta data and tools slots:
\describe{
\item{mixscape_class}{Classification result with cells being either
classified as perturbed (KO, by default) or non-perturbed (NP) based on their target
gene class.}
\item{mixscape_class.global}{Global classification result (perturbed, NP or NT)}
\item{p_ko}{Posterior probabilities used to determine if a cell is KO (default). Name of this item will change to match prtb.type parameter setting.
(>0.5) or NP}
\item{perturbation score}{Perturbation scores for every cell calculated in
the first iteration of the function.}
}
}
\description{
Function to identify perturbed and non-perturbed gRNA expressing cells that
accounts for multiple treatments/conditions/chemical perturbations.
}
\concept{mixscape}
Seurat/man/Cells.Rd 0000644 0001762 0000144 00000001106 14525500037 013620 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{Cells.SCTModel}
\alias{Cells.SCTModel}
\alias{Cells.SlideSeq}
\alias{Cells.STARmap}
\alias{Cells.VisiumV1}
\title{Get Cell Names}
\usage{
\method{Cells}{SCTModel}(x, ...)
\method{Cells}{SlideSeq}(x, ...)
\method{Cells}{STARmap}(x, ...)
\method{Cells}{VisiumV1}(x, ...)
}
\arguments{
\item{x}{An object}
\item{...}{Arguments passed to other methods}
}
\description{
Get Cell Names
}
\seealso{
\code{\link[SeuratObject:Cells]{SeuratObject::Cells}}
}
\concept{objects}
\concept{spatial}
Seurat/man/RunMoransI.Rd 0000644 0001762 0000144 00000000733 14525500037 014620 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{RunMoransI}
\alias{RunMoransI}
\title{Compute Moran's I value.}
\usage{
RunMoransI(data, pos, verbose = TRUE)
}
\arguments{
\item{data}{Expression matrix}
\item{pos}{Position matrix}
\item{verbose}{Display messages/progress}
}
\description{
Wraps the functionality of the Moran.I function from the ape package.
Weights are computed as 1/distance.
}
\concept{preprocessing}
Seurat/man/fortify-Spatial.Rd 0000644 0001762 0000144 00000001667 14525500037 015647 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{fortify-Spatial}
\alias{fortify-Spatial}
\alias{fortify.Centroids}
\alias{fortify}
\alias{fortify.Molecules}
\alias{fortify.Segmentation}
\title{Prepare Coordinates for Spatial Plots}
\usage{
\method{fortify}{Centroids}(model, data, ...)
\method{fortify}{Molecules}(model, data, nmols = NULL, seed = NA_integer_, ...)
\method{fortify}{Segmentation}(model, data, ...)
}
\arguments{
\item{model}{A \code{\linkS4class{Segmentation}},
\code{\linkS4class{Centroids}},
or \code{\linkS4class{Molecules}} object}
\item{data}{Extra data to be used for annotating the cell segmentations; the
easiest way to pass data is a one-column
\code{\link[base:data.frame]{data frame}} with the values to color by and
the cell names are rownames}
\item{...}{Arguments passed to other methods}
}
\description{
Prepare Coordinates for Spatial Plots
}
\keyword{internal}
Seurat/man/VisiumV1-class.Rd 0000644 0001762 0000144 00000001377 14525500037 015356 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{VisiumV1-class}
\alias{VisiumV1-class}
\alias{VisiumV1}
\title{The VisiumV1 class}
\description{
The VisiumV1 class represents spatial information from the 10X Genomics Visium
platform
}
\section{Slots}{
\describe{
\item{\code{image}}{A three-dimensional array with PNG image data, see
\code{\link[png]{readPNG}} for more details}
\item{\code{scale.factors}}{An object of class \code{\link{scalefactors}}; see
\code{\link{scalefactors}} for more information}
\item{\code{coordinates}}{A data frame with tissue coordinate information}
\item{\code{spot.radius}}{Single numeric value giving the radius of the spots}
}}
\concept{objects}
\concept{spatial}
Seurat/man/Read10X_h5.Rd 0000644 0001762 0000144 00000001326 14525500037 014322 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{Read10X_h5}
\alias{Read10X_h5}
\title{Read 10X hdf5 file}
\usage{
Read10X_h5(filename, use.names = TRUE, unique.features = TRUE)
}
\arguments{
\item{filename}{Path to h5 file}
\item{use.names}{Label row names with feature names rather than ID numbers.}
\item{unique.features}{Make feature names unique (default TRUE)}
}
\value{
Returns a sparse matrix with rows and columns labeled. If multiple
genomes are present, returns a list of sparse matrices (one per genome).
}
\description{
Read count matrix from 10X CellRanger hdf5 file.
This can be used to read both scATAC-seq and scRNA-seq matrices.
}
\concept{preprocessing}
Seurat/man/SeuratTheme.Rd 0000644 0001762 0000144 00000006021 14525500037 015005 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{SeuratTheme}
\alias{SeuratTheme}
\alias{CenterTitle}
\alias{DarkTheme}
\alias{FontSize}
\alias{NoAxes}
\alias{NoLegend}
\alias{NoGrid}
\alias{SeuratAxes}
\alias{SpatialTheme}
\alias{RestoreLegend}
\alias{RotatedAxis}
\alias{BoldTitle}
\alias{WhiteBackground}
\title{Seurat Themes}
\usage{
SeuratTheme()
CenterTitle(...)
DarkTheme(...)
FontSize(
x.text = NULL,
y.text = NULL,
x.title = NULL,
y.title = NULL,
main = NULL,
...
)
NoAxes(..., keep.text = FALSE, keep.ticks = FALSE)
NoLegend(...)
NoGrid(...)
SeuratAxes(...)
SpatialTheme(...)
RestoreLegend(..., position = "right")
RotatedAxis(...)
BoldTitle(...)
WhiteBackground(...)
}
\arguments{
\item{...}{Extra parameters to be passed to \code{theme}}
\item{x.text, y.text}{X and Y axis text sizes}
\item{x.title, y.title}{X and Y axis title sizes}
\item{main}{Plot title size}
\item{keep.text}{Keep axis text}
\item{keep.ticks}{Keep axis ticks}
\item{position}{A position to restore the legend to}
}
\value{
A ggplot2 theme object
}
\description{
Various themes to be applied to ggplot2-based plots
\describe{
\item{\code{SeuratTheme}}{The curated Seurat theme, consists of ...}
\item{\code{DarkTheme}}{A dark theme, axes and text turn to white, the background becomes black}
\item{\code{NoAxes}}{Removes axis lines, text, and ticks}
\item{\code{NoLegend}}{Removes the legend}
\item{\code{FontSize}}{Sets axis and title font sizes}
\item{\code{NoGrid}}{Removes grid lines}
\item{\code{SeuratAxes}}{Set Seurat-style axes}
\item{\code{SpatialTheme}}{A theme designed for spatial visualizations (eg \code{\link{PolyFeaturePlot}}, \code{\link{PolyDimPlot}})}
\item{\code{RestoreLegend}}{Restore a legend after removal}
\item{\code{RotatedAxis}}{Rotate X axis text 45 degrees}
\item{\code{BoldTitle}}{Enlarges and emphasizes the title}
}
}
\examples{
# Generate a plot with a dark theme
library(ggplot2)
df <- data.frame(x = rnorm(n = 100, mean = 20, sd = 2), y = rbinom(n = 100, size = 100, prob = 0.2))
p <- ggplot(data = df, mapping = aes(x = x, y = y)) + geom_point(mapping = aes(color = 'red'))
p + DarkTheme(legend.position = 'none')
# Generate a plot with no axes
library(ggplot2)
df <- data.frame(x = rnorm(n = 100, mean = 20, sd = 2), y = rbinom(n = 100, size = 100, prob = 0.2))
p <- ggplot(data = df, mapping = aes(x = x, y = y)) + geom_point(mapping = aes(color = 'red'))
p + NoAxes()
# Generate a plot with no legend
library(ggplot2)
df <- data.frame(x = rnorm(n = 100, mean = 20, sd = 2), y = rbinom(n = 100, size = 100, prob = 0.2))
p <- ggplot(data = df, mapping = aes(x = x, y = y)) + geom_point(mapping = aes(color = 'red'))
p + NoLegend()
# Generate a plot with no grid lines
library(ggplot2)
df <- data.frame(x = rnorm(n = 100, mean = 20, sd = 2), y = rbinom(n = 100, size = 100, prob = 0.2))
p <- ggplot(data = df, mapping = aes(x = x, y = y)) + geom_point(mapping = aes(color = 'red'))
p + NoGrid()
}
\seealso{
\code{\link[ggplot2]{theme}}
}
\concept{visualization}
Seurat/man/BridgeCellsRepresentation.Rd 0000644 0001762 0000144 00000003040 14525500037 017657 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{BridgeCellsRepresentation}
\alias{BridgeCellsRepresentation}
\title{Construct a dictionary representation for each unimodal dataset}
\usage{
BridgeCellsRepresentation(
object.list,
bridge.object,
object.reduction,
bridge.reduction,
laplacian.reduction = "lap",
laplacian.dims = 1:50,
bridge.assay.name = "Bridge",
return.all.assays = FALSE,
l2.norm = TRUE,
verbose = TRUE
)
}
\arguments{
\item{object.list}{A list of Seurat objects}
\item{bridge.object}{A multi-omic bridge Seurat which is used as the basis to
represent unimodal datasets}
\item{object.reduction}{A list of dimensional reductions from object.list used
to be reconstructed by bridge.object}
\item{bridge.reduction}{A list of dimensional reductions from bridge.object used
to reconstruct object.reduction}
\item{laplacian.reduction}{Name of bridge graph laplacian dimensional reduction}
\item{laplacian.dims}{Dimensions used for bridge graph laplacian dimensional reduction}
\item{bridge.assay.name}{Assay name used for bridge object reconstruction value (default is 'Bridge')}
\item{return.all.assays}{Whether to return all assays in the object.list.
Only bridge assay is returned by default.}
\item{l2.norm}{Whether to l2 normalize the dictionary representation}
\item{verbose}{Print messages and progress}
}
\value{
Returns a object list in which each object has a bridge cell derived assay
}
\description{
Construct a dictionary representation for each unimodal dataset
}
Seurat/man/RPCAIntegration.Rd 0000644 0001762 0000144 00000010403 14525500037 015507 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration5.R
\name{RPCAIntegration}
\alias{RPCAIntegration}
\title{Seurat-RPCA Integration}
\usage{
RPCAIntegration(
object = NULL,
assay = NULL,
layers = NULL,
orig = NULL,
new.reduction = "integrated.dr",
reference = NULL,
features = NULL,
normalization.method = c("LogNormalize", "SCT"),
dims = 1:30,
k.filter = NA,
scale.layer = "scale.data",
dims.to.integrate = NULL,
k.weight = 100,
weight.reduction = NULL,
sd.weight = 1,
sample.tree = NULL,
preserve.order = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{A \code{Seurat} object}
\item{assay}{Name of \code{Assay} in the \code{Seurat} object}
\item{layers}{Names of layers in \code{assay}}
\item{orig}{A \link[SeuratObject:DimReduc]{dimensional reduction} to correct}
\item{new.reduction}{Name of new integrated dimensional reduction}
\item{reference}{A reference \code{Seurat} object}
\item{features}{A vector of features to use for integration}
\item{normalization.method}{Name of normalization method used: LogNormalize
or SCT}
\item{dims}{Dimensions of dimensional reduction to use for integration}
\item{k.filter}{Number of anchors to filter}
\item{scale.layer}{Name of scaled layer in \code{Assay}}
\item{dims.to.integrate}{Number of dimensions to return integrated values for}
\item{k.weight}{Number of neighbors to consider when weighting anchors}
\item{weight.reduction}{Dimension reduction to use when calculating anchor
weights. This can be one of:
\itemize{
\item{A string, specifying the name of a dimension reduction present in
all objects to be integrated}
\item{A vector of strings, specifying the name of a dimension reduction to
use for each object to be integrated}
\item{A vector of \code{\link{DimReduc}} objects, specifying the object to
use for each object in the integration}
\item{NULL, in which case the full corrected space is used for computing
anchor weights.}
}}
\item{sd.weight}{Controls the bandwidth of the Gaussian kernel for weighting}
\item{sample.tree}{Specify the order of integration. Order of integration
should be encoded in a matrix, where each row represents one of the pairwise
integration steps. Negative numbers specify a dataset, positive numbers
specify the integration results from a given row (the format of the merge
matrix included in the \code{\link{hclust}} function output). For example:
\code{matrix(c(-2, 1, -3, -1), ncol = 2)} gives:
\if{html}{\out{
}}\preformatted{ [,1] [,2]
[1,] -2 -3
[2,] 1 -1
}\if{html}{\out{
}}
Which would cause dataset 2 and 3 to be integrated first, then the resulting
object integrated with dataset 1.
If NULL, the sample tree will be computed automatically.}
\item{preserve.order}{Do not reorder objects based on size for each pairwise
integration.}
\item{verbose}{Print progress}
\item{...}{Arguments passed on to \code{FindIntegrationAnchors}}
}
\description{
Seurat-RPCA Integration
}
\examples{
\dontrun{
# Preprocessing
obj <- SeuratData::LoadData("pbmcsca")
obj[["RNA"]] <- split(obj[["RNA"]], f = obj$Method)
obj <- NormalizeData(obj)
obj <- FindVariableFeatures(obj)
obj <- ScaleData(obj)
obj <- RunPCA(obj)
# After preprocessing, we run integration
obj <- IntegrateLayers(object = obj, method = RPCAIntegration,
orig.reduction = "pca", new.reduction = 'integrated.rpca',
verbose = FALSE)
# Reference-based Integration
# Here, we use the first layer as a reference for integraion
# Thus, we only identify anchors between the reference and the rest of the datasets,
# saving computational resources
obj <- IntegrateLayers(object = obj, method = RPCAIntegration,
orig.reduction = "pca", new.reduction = 'integrated.rpca',
reference = 1, verbose = FALSE)
# Modifying parameters
# We can also specify parameters such as `k.anchor` to increase the strength of
# integration
obj <- IntegrateLayers(object = obj, method = RPCAIntegration,
orig.reduction = "pca", new.reduction = 'integrated.rpca',
k.anchor = 20, verbose = FALSE)
# Integrating SCTransformed data
obj <- SCTransform(object = obj)
obj <- IntegrateLayers(object = obj, method = RPCAIntegration,
orig.reduction = "pca", new.reduction = 'integrated.rpca',
assay = "SCT", verbose = FALSE)
}
}
Seurat/man/ProjectUMAP.Rd 0000644 0001762 0000144 00000006113 14525500037 014652 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/dimensional_reduction.R
\name{ProjectUMAP}
\alias{ProjectUMAP}
\alias{ProjectUMAP.default}
\alias{ProjectUMAP.DimReduc}
\alias{ProjectUMAP.Seurat}
\title{Project query into UMAP coordinates of a reference}
\usage{
ProjectUMAP(query, ...)
\method{ProjectUMAP}{default}(
query,
query.dims = NULL,
reference,
reference.dims = NULL,
k.param = 30,
nn.method = "annoy",
n.trees = 50,
annoy.metric = "cosine",
l2.norm = FALSE,
cache.index = TRUE,
index = NULL,
neighbor.name = "query_ref.nn",
reduction.model,
...
)
\method{ProjectUMAP}{DimReduc}(
query,
query.dims = NULL,
reference,
reference.dims = NULL,
k.param = 30,
nn.method = "annoy",
n.trees = 50,
annoy.metric = "cosine",
l2.norm = FALSE,
cache.index = TRUE,
index = NULL,
neighbor.name = "query_ref.nn",
reduction.model,
...
)
\method{ProjectUMAP}{Seurat}(
query,
query.reduction,
query.dims = NULL,
reference,
reference.reduction,
reference.dims = NULL,
k.param = 30,
nn.method = "annoy",
n.trees = 50,
annoy.metric = "cosine",
l2.norm = FALSE,
cache.index = TRUE,
index = NULL,
neighbor.name = "query_ref.nn",
reduction.model,
reduction.name = "ref.umap",
reduction.key = "refUMAP_",
...
)
}
\arguments{
\item{query}{Query dataset}
\item{...}{Additional parameters to \code{\link{RunUMAP}}}
\item{query.dims}{Dimensions (columns) to use from query}
\item{reference}{Reference dataset}
\item{reference.dims}{Dimensions (columns) to use from reference}
\item{k.param}{Defines k for the k-nearest neighbor algorithm}
\item{nn.method}{Method for nearest neighbor finding. Options include: rann,
annoy}
\item{n.trees}{More trees gives higher precision when using annoy approximate
nearest neighbor search}
\item{annoy.metric}{Distance metric for annoy. Options include: euclidean,
cosine, manhattan, and hamming}
\item{l2.norm}{Take L2Norm of the data}
\item{cache.index}{Include cached index in returned Neighbor object
(only relevant if return.neighbor = TRUE)}
\item{index}{Precomputed index. Useful if querying new data against existing
index to avoid recomputing.}
\item{neighbor.name}{Name to store neighbor information in the query}
\item{reduction.model}{\code{DimReduc} object that contains the umap model}
\item{query.reduction}{Name of reduction to use from the query for neighbor
finding}
\item{reference.reduction}{Name of reduction to use from the reference for
neighbor finding}
\item{reduction.name}{Name of projected UMAP to store in the query}
\item{reduction.key}{Value for the projected UMAP key}
}
\description{
This function will take a query dataset and project it into the coordinates
of a provided reference UMAP. This is essentially a wrapper around two steps:
\itemize{
\item{FindNeighbors - Find the nearest reference cell neighbors and their
distances for each query cell.}
\item{RunUMAP - Perform umap projection by providing the neighbor set
calculated above and the umap model previously computed in the reference.}
}
}
\concept{dimensional_reduction}
Seurat/man/FindNeighbors.Rd 0000644 0001762 0000144 00000012470 14525500037 015305 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/clustering.R
\name{FindNeighbors}
\alias{FindNeighbors}
\alias{FindNeighbors.default}
\alias{FindNeighbors.Assay}
\alias{FindNeighbors.dist}
\alias{FindNeighbors.Seurat}
\title{(Shared) Nearest-neighbor graph construction}
\usage{
FindNeighbors(object, ...)
\method{FindNeighbors}{default}(
object,
query = NULL,
distance.matrix = FALSE,
k.param = 20,
return.neighbor = FALSE,
compute.SNN = !return.neighbor,
prune.SNN = 1/15,
nn.method = "annoy",
n.trees = 50,
annoy.metric = "euclidean",
nn.eps = 0,
verbose = TRUE,
l2.norm = FALSE,
cache.index = FALSE,
index = NULL,
...
)
\method{FindNeighbors}{Assay}(
object,
features = NULL,
k.param = 20,
return.neighbor = FALSE,
compute.SNN = !return.neighbor,
prune.SNN = 1/15,
nn.method = "annoy",
n.trees = 50,
annoy.metric = "euclidean",
nn.eps = 0,
verbose = TRUE,
l2.norm = FALSE,
cache.index = FALSE,
...
)
\method{FindNeighbors}{dist}(
object,
k.param = 20,
return.neighbor = FALSE,
compute.SNN = !return.neighbor,
prune.SNN = 1/15,
nn.method = "annoy",
n.trees = 50,
annoy.metric = "euclidean",
nn.eps = 0,
verbose = TRUE,
l2.norm = FALSE,
cache.index = FALSE,
...
)
\method{FindNeighbors}{Seurat}(
object,
reduction = "pca",
dims = 1:10,
assay = NULL,
features = NULL,
k.param = 20,
return.neighbor = FALSE,
compute.SNN = !return.neighbor,
prune.SNN = 1/15,
nn.method = "annoy",
n.trees = 50,
annoy.metric = "euclidean",
nn.eps = 0,
verbose = TRUE,
do.plot = FALSE,
graph.name = NULL,
l2.norm = FALSE,
cache.index = FALSE,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods}
\item{query}{Matrix of data to query against object. If missing, defaults to
object.}
\item{distance.matrix}{Boolean value of whether the provided matrix is a
distance matrix; note, for objects of class \code{dist}, this parameter will
be set automatically}
\item{k.param}{Defines k for the k-nearest neighbor algorithm}
\item{return.neighbor}{Return result as \code{\link{Neighbor}} object. Not
used with distance matrix input.}
\item{compute.SNN}{also compute the shared nearest neighbor graph}
\item{prune.SNN}{Sets the cutoff for acceptable Jaccard index when
computing the neighborhood overlap for the SNN construction. Any edges with
values less than or equal to this will be set to 0 and removed from the SNN
graph. Essentially sets the stringency of pruning (0 --- no pruning, 1 ---
prune everything).}
\item{nn.method}{Method for nearest neighbor finding. Options include: rann,
annoy}
\item{n.trees}{More trees gives higher precision when using annoy approximate
nearest neighbor search}
\item{annoy.metric}{Distance metric for annoy. Options include: euclidean,
cosine, manhattan, and hamming}
\item{nn.eps}{Error bound when performing nearest neighbor seach using RANN;
default of 0.0 implies exact nearest neighbor search}
\item{verbose}{Whether or not to print output to the console}
\item{l2.norm}{Take L2Norm of the data}
\item{cache.index}{Include cached index in returned Neighbor object
(only relevant if return.neighbor = TRUE)}
\item{index}{Precomputed index. Useful if querying new data against existing
index to avoid recomputing.}
\item{features}{Features to use as input for building the (S)NN; used only when
\code{dims} is \code{NULL}}
\item{reduction}{Reduction to use as input for building the (S)NN}
\item{dims}{Dimensions of reduction to use as input}
\item{assay}{Assay to use in construction of (S)NN; used only when \code{dims}
is \code{NULL}}
\item{do.plot}{Plot SNN graph on tSNE coordinates}
\item{graph.name}{Optional naming parameter for stored (S)NN graph
(or Neighbor object, if return.neighbor = TRUE). Default is assay.name_(s)nn.
To store both the neighbor graph and the shared nearest neighbor (SNN) graph,
you must supply a vector containing two names to the \code{graph.name}
parameter. The first element in the vector will be used to store the nearest
neighbor (NN) graph, and the second element used to store the SNN graph. If
only one name is supplied, only the NN graph is stored.}
}
\value{
This function can either return a \code{\link{Neighbor}} object
with the KNN information or a list of \code{\link{Graph}} objects with
the KNN and SNN depending on the settings of \code{return.neighbor} and
\code{compute.SNN}. When running on a \code{\link{Seurat}} object, this
returns the \code{\link{Seurat}} object with the Graphs or Neighbor objects
stored in their respective slots. Names of the Graph or Neighbor object can
be found with \code{\link{Graphs}} or \code{\link{Neighbors}}.
}
\description{
Computes the \code{k.param} nearest neighbors for a given dataset. Can also
optionally (via \code{compute.SNN}), construct a shared nearest neighbor
graph by calculating the neighborhood overlap (Jaccard index) between every
cell and its \code{k.param} nearest neighbors.
}
\examples{
data("pbmc_small")
pbmc_small
# Compute an SNN on the gene expression level
pbmc_small <- FindNeighbors(pbmc_small, features = VariableFeatures(object = pbmc_small))
# More commonly, we build the SNN on a dimensionally reduced form of the data
# such as the first 10 principle components.
pbmc_small <- FindNeighbors(pbmc_small, reduction = "pca", dims = 1:10)
}
\concept{clustering}
Seurat/man/Seurat-class.Rd 0000644 0001762 0000144 00000000664 14525500037 015134 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reexports.R
\docType{class}
\name{Seurat-class}
\alias{Seurat-class}
\title{The Seurat Class}
\description{
The Seurat object is a representation of single-cell expression data for R;
for more details, please see the documentation in
\code{\link[SeuratObject:Seurat]{SeuratObject}}
}
\seealso{
\code{\link[SeuratObject:Seurat]{SeuratObject::Seurat-class}}
}
Seurat/man/GroupCorrelationPlot.Rd 0000644 0001762 0000144 00000001522 14525500037 016715 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{GroupCorrelationPlot}
\alias{GroupCorrelationPlot}
\title{Boxplot of correlation of a variable (e.g. number of UMIs) with expression
data}
\usage{
GroupCorrelationPlot(
object,
assay = NULL,
feature.group = "feature.grp",
cor = "nCount_RNA_cor"
)
}
\arguments{
\item{object}{Seurat object}
\item{assay}{Assay where the feature grouping info and correlations are
stored}
\item{feature.group}{Name of the column in meta.features where the feature
grouping info is stored}
\item{cor}{Name of the column in meta.features where correlation info is
stored}
}
\value{
Returns a ggplot boxplot of correlations split by group
}
\description{
Boxplot of correlation of a variable (e.g. number of UMIs) with expression
data
}
\concept{visualization}
Seurat/man/BuildNicheAssay.Rd 0000644 0001762 0000144 00000001614 14525500037 015571 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{BuildNicheAssay}
\alias{BuildNicheAssay}
\title{Construct an assay for spatial niche analysis}
\usage{
BuildNicheAssay(
object,
fov,
group.by,
assay = "niche",
neighbors.k = 20,
niches.k = 4
)
}
\arguments{
\item{object}{A Seurat object}
\item{fov}{FOV object to gather cell positions from}
\item{group.by}{Cell classifications to count in spatial neighborhood}
\item{assay}{Name for spatial neighborhoods assay}
\item{neighbors.k}{Number of neighbors to consider for each cell}
\item{niches.k}{Number of clusters to return based on the niche assay}
}
\value{
Seurat object containing a new assay
}
\description{
This function will construct a new assay where each feature is a
cell label The values represents the sum of a particular cell label
neighboring a given cell.
}
\concept{clustering}
Seurat/man/LoadSTARmap.Rd 0000644 0001762 0000144 00000002011 14525500037 014621 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{LoadSTARmap}
\alias{LoadSTARmap}
\title{Load STARmap data}
\usage{
LoadSTARmap(
data.dir,
counts.file = "cell_barcode_count.csv",
gene.file = "genes.csv",
qhull.file = "qhulls.tsv",
centroid.file = "centroids.tsv",
assay = "Spatial",
image = "image"
)
}
\arguments{
\item{data.dir}{location of data directory that contains the counts matrix,
gene name, qhull, and centroid files.}
\item{counts.file}{name of file containing the counts matrix (csv)}
\item{gene.file}{name of file containing the gene names (csv)}
\item{qhull.file}{name of file containing the hull coordinates (tsv)}
\item{centroid.file}{name of file containing the centroid positions (tsv)}
\item{assay}{Name of assay to associate spatial data to}
\item{image}{Name of "image" object storing spatial coordinates}
}
\value{
A \code{\link{Seurat}} object
}
\description{
Load STARmap data
}
\seealso{
\code{\link{STARmap}}
}
\concept{preprocessing}
Seurat/man/DoHeatmap.Rd 0000644 0001762 0000144 00000004740 14525500037 014427 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{DoHeatmap}
\alias{DoHeatmap}
\title{Feature expression heatmap}
\usage{
DoHeatmap(
object,
features = NULL,
cells = NULL,
group.by = "ident",
group.bar = TRUE,
group.colors = NULL,
disp.min = -2.5,
disp.max = NULL,
slot = "scale.data",
assay = NULL,
label = TRUE,
size = 5.5,
hjust = 0,
vjust = 0,
angle = 45,
raster = TRUE,
draw.lines = TRUE,
lines.width = NULL,
group.bar.height = 0.02,
combine = TRUE
)
}
\arguments{
\item{object}{Seurat object}
\item{features}{A vector of features to plot, defaults to \code{VariableFeatures(object = object)}}
\item{cells}{A vector of cells to plot}
\item{group.by}{A vector of variables to group cells by; pass 'ident' to group by cell identity classes}
\item{group.bar}{Add a color bar showing group status for cells}
\item{group.colors}{Colors to use for the color bar}
\item{disp.min}{Minimum display value (all values below are clipped)}
\item{disp.max}{Maximum display value (all values above are clipped); defaults to 2.5
if \code{slot} is 'scale.data', 6 otherwise}
\item{slot}{Data slot to use, choose from 'raw.data', 'data', or 'scale.data'}
\item{assay}{Assay to pull from}
\item{label}{Label the cell identies above the color bar}
\item{size}{Size of text above color bar}
\item{hjust}{Horizontal justification of text above color bar}
\item{vjust}{Vertical justification of text above color bar}
\item{angle}{Angle of text above color bar}
\item{raster}{If true, plot with geom_raster, else use geom_tile. geom_raster may look blurry on
some viewing applications such as Preview due to how the raster is interpolated. Set this to FALSE
if you are encountering that issue (note that plots may take longer to produce/render).}
\item{draw.lines}{Include white lines to separate the groups}
\item{lines.width}{Integer number to adjust the width of the separating white lines.
Corresponds to the number of "cells" between each group.}
\item{group.bar.height}{Scale the height of the color bar}
\item{combine}{Combine plots into a single \code{\link[patchwork]{patchwork}ed}
ggplot object. If \code{FALSE}, return a list of ggplot objects}
}
\value{
A \code{\link[patchwork]{patchwork}ed} ggplot object if
\code{combine = TRUE}; otherwise, a list of ggplot objects
}
\description{
Draws a heatmap of single cell feature expression.
}
\examples{
data("pbmc_small")
DoHeatmap(object = pbmc_small)
}
\concept{visualization}
Seurat/man/CalculateBarcodeInflections.Rd 0000644 0001762 0000144 00000004556 14525500037 020145 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{CalculateBarcodeInflections}
\alias{CalculateBarcodeInflections}
\title{Calculate the Barcode Distribution Inflection}
\usage{
CalculateBarcodeInflections(
object,
barcode.column = "nCount_RNA",
group.column = "orig.ident",
threshold.low = NULL,
threshold.high = NULL
)
}
\arguments{
\item{object}{Seurat object}
\item{barcode.column}{Column to use as proxy for barcodes ("nCount_RNA" by default)}
\item{group.column}{Column to group by ("orig.ident" by default)}
\item{threshold.low}{Ignore barcodes of rank below this threshold in inflection calculation}
\item{threshold.high}{Ignore barcodes of rank above thisf threshold in inflection calculation}
}
\value{
Returns Seurat object with a new list in the `tools` slot, `CalculateBarcodeInflections` with values:
* `barcode_distribution` - contains the full barcode distribution across the entire dataset
* `inflection_points` - the calculated inflection points within the thresholds
* `threshold_values` - the provided (or default) threshold values to search within for inflections
* `cells_pass` - the cells that pass the inflection point calculation
}
\description{
This function calculates an adaptive inflection point ("knee") of the barcode distribution
for each sample group. This is useful for determining a threshold for removing
low-quality samples.
}
\details{
The function operates by calculating the slope of the barcode number vs. rank
distribution, and then finding the point at which the distribution changes most
steeply (the "knee"). Of note, this calculation often must be restricted as to the
range at which it performs, so `threshold` parameters are provided to restrict the
range of the calculation based on the rank of the barcodes. [BarcodeInflectionsPlot()]
is provided as a convenience function to visualize and test different thresholds and
thus provide more sensical end results.
See [BarcodeInflectionsPlot()] to visualize the calculated inflection points and
[SubsetByBarcodeInflections()] to subsequently subset the Seurat object.
}
\examples{
data("pbmc_small")
CalculateBarcodeInflections(pbmc_small, group.column = 'groups')
}
\seealso{
\code{\link{BarcodeInflectionsPlot}} \code{\link{SubsetByBarcodeInflections}}
}
\author{
Robert A. Amezquita, \email{robert.amezquita@fredhutch.org}
}
\concept{preprocessing}
Seurat/man/FetchResiduals_reference.Rd 0000644 0001762 0000144 00000001366 14525500037 017511 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing5.R
\name{FetchResiduals_reference}
\alias{FetchResiduals_reference}
\title{temporal function to get residuals from reference}
\usage{
FetchResiduals_reference(
object,
reference.SCT.model = NULL,
features = NULL,
nCount_UMI = NULL,
verbose = FALSE
)
}
\arguments{
\item{object}{A seurat object}
\item{reference.SCT.model}{a reference SCT model that should be used
for calculating the residuals}
\item{features}{Names of features to compute}
\item{nCount_UMI}{UMI counts. If not specified, defaults to
column sums of object}
\item{verbose}{Whether to print messages and progress bars}
}
\description{
temporal function to get residuals from reference
}
Seurat/man/CollapseSpeciesExpressionMatrix.Rd 0000644 0001762 0000144 00000002752 14525500037 021111 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{CollapseSpeciesExpressionMatrix}
\alias{CollapseSpeciesExpressionMatrix}
\title{Slim down a multi-species expression matrix, when only one species is primarily of interenst.}
\usage{
CollapseSpeciesExpressionMatrix(
object,
prefix = "HUMAN_",
controls = "MOUSE_",
ncontrols = 100
)
}
\arguments{
\item{object}{A UMI count matrix. Should contain rownames that start with
the ensuing arguments prefix.1 or prefix.2}
\item{prefix}{The prefix denoting rownames for the species of interest.
Default is "HUMAN_". These rownames will have this prefix removed in the returned matrix.}
\item{controls}{The prefix denoting rownames for the species of 'negative
control' cells. Default is "MOUSE_".}
\item{ncontrols}{How many of the most highly expressed (average) negative
control features (by default, 100 mouse genes), should be kept? All other
rownames starting with prefix.2 are discarded.}
}
\value{
A UMI count matrix. Rownames that started with \code{prefix} have this
prefix discarded. For rownames starting with \code{controls}, only the
\code{ncontrols} most highly expressed features are kept, and the
prefix is kept. All other rows are retained.
}
\description{
Valuable for CITE-seq analyses, where we typically spike in rare populations of 'negative control' cells from a different species.
}
\examples{
\dontrun{
cbmc.rna.collapsed <- CollapseSpeciesExpressionMatrix(cbmc.rna)
}
}
\concept{utilities}
Seurat/man/PCASigGenes.Rd 0000644 0001762 0000144 00000002064 14525500037 014612 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimensional_reduction.R
\name{PCASigGenes}
\alias{PCASigGenes}
\title{Significant genes from a PCA}
\usage{
PCASigGenes(
object,
pcs.use,
pval.cut = 0.1,
use.full = FALSE,
max.per.pc = NULL
)
}
\arguments{
\item{object}{Seurat object}
\item{pcs.use}{PCS to use.}
\item{pval.cut}{P-value cutoff}
\item{use.full}{Use the full list of genes (from the projected PCA). Assumes
that \code{ProjectDim} has been run. Currently, must be set to FALSE.}
\item{max.per.pc}{Maximum number of genes to return per PC. Used to avoid genes from one PC dominating the entire analysis.}
}
\value{
A vector of genes whose p-values are statistically significant for
at least one of the given PCs.
}
\description{
Returns a set of genes, based on the JackStraw analysis, that have
statistically significant associations with a set of PCs.
}
\examples{
data("pbmc_small")
PCASigGenes(pbmc_small, pcs.use = 1:2)
}
\seealso{
\code{\link{ProjectDim}} \code{\link{JackStraw}}
}
\concept{dimensional_reduction}
Seurat/man/RunCCA.Rd 0000644 0001762 0000144 00000005003 14525500037 013631 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/dimensional_reduction.R
\name{RunCCA}
\alias{RunCCA}
\alias{RunCCA.default}
\alias{RunCCA.Seurat}
\title{Perform Canonical Correlation Analysis}
\usage{
RunCCA(object1, object2, ...)
\method{RunCCA}{default}(
object1,
object2,
standardize = TRUE,
num.cc = 20,
seed.use = 42,
verbose = FALSE,
...
)
\method{RunCCA}{Seurat}(
object1,
object2,
assay1 = NULL,
assay2 = NULL,
num.cc = 20,
features = NULL,
renormalize = FALSE,
rescale = FALSE,
compute.gene.loadings = TRUE,
add.cell.id1 = NULL,
add.cell.id2 = NULL,
verbose = TRUE,
...
)
}
\arguments{
\item{object1}{First Seurat object}
\item{object2}{Second Seurat object.}
\item{...}{Extra parameters (passed onto MergeSeurat in case with two objects
passed, passed onto ScaleData in case with single object and rescale.groups
set to TRUE)}
\item{standardize}{Standardize matrices - scales columns to have unit variance
and mean 0}
\item{num.cc}{Number of canonical vectors to calculate}
\item{seed.use}{Random seed to set. If NULL, does not set a seed}
\item{verbose}{Show progress messages}
\item{assay1, assay2}{Assays to pull from in the first and second objects, respectively}
\item{features}{Set of genes to use in CCA. Default is the union of both
the variable features sets present in both objects.}
\item{renormalize}{Renormalize raw data after merging the objects. If FALSE,
merge the data matrices also.}
\item{rescale}{Rescale the datasets prior to CCA. If FALSE, uses existing data in the scale data slots.}
\item{compute.gene.loadings}{Also compute the gene loadings. NOTE - this will
scale every gene in the dataset which may impose a high memory cost.}
\item{add.cell.id1, add.cell.id2}{Add ...}
}
\value{
Returns a combined Seurat object with the CCA results stored.
}
\description{
Runs a canonical correlation analysis using a diagonal implementation of CCA.
For details about stored CCA calculation parameters, see
\code{PrintCCAParams}.
}
\examples{
\dontrun{
data("pbmc_small")
pbmc_small
# As CCA requires two datasets, we will split our test object into two just for this example
pbmc1 <- subset(pbmc_small, cells = colnames(pbmc_small)[1:40])
pbmc2 <- subset(pbmc_small, cells = colnames(x = pbmc_small)[41:80])
pbmc1[["group"]] <- "group1"
pbmc2[["group"]] <- "group2"
pbmc_cca <- RunCCA(object1 = pbmc1, object2 = pbmc2)
# Print results
print(x = pbmc_cca[["cca"]])
}
}
\seealso{
\code{\link{merge.Seurat}}
}
\concept{dimensional_reduction}
Seurat/man/GetIntegrationData.Rd 0000644 0001762 0000144 00000001000 14525500037 016264 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{GetIntegrationData}
\alias{GetIntegrationData}
\title{Get integration data}
\usage{
GetIntegrationData(object, integration.name, slot)
}
\arguments{
\item{object}{Seurat object}
\item{integration.name}{Name of integration object}
\item{slot}{Which slot in integration object to get}
}
\value{
Returns data from the requested slot within the integrated object
}
\description{
Get integration data
}
\concept{objects}
Seurat/man/LoadCurioSeeker.Rd 0000644 0001762 0000144 00000001035 14525500037 015577 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{LoadCurioSeeker}
\alias{LoadCurioSeeker}
\title{Load Curio Seeker data}
\usage{
LoadCurioSeeker(data.dir, assay = "Spatial")
}
\arguments{
\item{data.dir}{location of data directory that contains the counts matrix,
gene names, barcodes/beads, and barcodes/bead location files.}
\item{assay}{Name of assay to associate spatial data to}
}
\value{
A \code{\link{Seurat}} object
}
\description{
Load Curio Seeker data
}
\concept{preprocessing}
Seurat/man/DiscretePalette.Rd 0000644 0001762 0000144 00000002073 14525500037 015643 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{DiscretePalette}
\alias{DiscretePalette}
\title{Discrete colour palettes from pals}
\usage{
DiscretePalette(n, palette = NULL, shuffle = FALSE)
}
\arguments{
\item{n}{Number of colours to be generated.}
\item{palette}{Options are
"alphabet", "alphabet2", "glasbey", "polychrome", "stepped", and "parade".
Can be omitted and the function will use the one based on the requested n.}
\item{shuffle}{Shuffle the colors in the selected palette.}
}
\value{
A vector of colors
}
\description{
These are included here because pals depends on a number of compiled
packages, and this can lead to increases in run time for Travis,
and generally should be avoided when possible.
}
\details{
These palettes are a much better default for data with many classes
than the default ggplot2 palette.
Many thanks to Kevin Wright for writing the pals package.
Taken from the pals package (Licence: GPL-3).
\url{https://cran.r-project.org/package=pals}
Credit: Kevin Wright
}
\concept{visualization}
Seurat/man/SetIntegrationData.Rd 0000644 0001762 0000144 00000001023 14525500037 016305 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{SetIntegrationData}
\alias{SetIntegrationData}
\title{Set integration data}
\usage{
SetIntegrationData(object, integration.name, slot, new.data)
}
\arguments{
\item{object}{Seurat object}
\item{integration.name}{Name of integration object}
\item{slot}{Which slot in integration object to set}
\item{new.data}{New data to insert}
}
\value{
Returns a \code{\link{Seurat}} object
}
\description{
Set integration data
}
\concept{objects}
Seurat/man/HarmonyIntegration.Rd 0000644 0001762 0000144 00000005621 14525500037 016405 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration5.R
\name{HarmonyIntegration}
\alias{HarmonyIntegration}
\title{Harmony Integration}
\usage{
HarmonyIntegration(
object,
orig,
features = NULL,
scale.layer = "scale.data",
new.reduction = "harmony",
layers = NULL,
npcs = 50L,
key = "harmony_",
theta = NULL,
lambda = NULL,
sigma = 0.1,
nclust = NULL,
tau = 0,
block.size = 0.05,
max.iter.harmony = 10L,
max.iter.cluster = 20L,
epsilon.cluster = 1e-05,
epsilon.harmony = 1e-04,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{An \code{\link[SeuratObject]{Assay5}} object}
\item{orig}{A \link[SeuratObject:DimReduc]{dimensional reduction} to correct}
\item{features}{Ignored}
\item{scale.layer}{Ignored}
\item{new.reduction}{Name of new integrated dimensional reduction}
\item{layers}{Ignored}
\item{npcs}{If doing PCA on input matrix, number of PCs to compute}
\item{key}{Key for Harmony dimensional reduction}
\item{theta}{Diversity clustering penalty parameter}
\item{lambda}{Ridge regression penalty parameter}
\item{sigma}{Width of soft kmeans clusters}
\item{nclust}{Number of clusters in model}
\item{tau}{Protection against overclustering small datasets with large ones}
\item{block.size}{What proportion of cells to update during clustering}
\item{max.iter.harmony}{Maximum number of rounds to run Harmony}
\item{max.iter.cluster}{Maximum number of rounds to run clustering at each round of Harmony}
\item{epsilon.cluster}{Convergence tolerance for clustering round of Harmony}
\item{epsilon.harmony}{Convergence tolerance for Harmony}
\item{verbose}{Whether to print progress messages. TRUE to print, FALSE to suppress}
\item{...}{Ignored}
}
\value{
...
}
\description{
Harmony Integration
}
\note{
This function requires the
\href{https://cran.r-project.org/package=harmony}{\pkg{harmony}} package
to be installed
}
\examples{
\dontrun{
# Preprocessing
obj <- SeuratData::LoadData("pbmcsca")
obj[["RNA"]] <- split(obj[["RNA"]], f = obj$Method)
obj <- NormalizeData(obj)
obj <- FindVariableFeatures(obj)
obj <- ScaleData(obj)
obj <- RunPCA(obj)
# After preprocessing, we integrate layers with added parameters specific to Harmony:
obj <- IntegrateLayers(object = obj, method = HarmonyIntegration, orig.reduction = "pca",
new.reduction = 'harmony', verbose = FALSE)
# Modifying Parameters
# We can also add arguments specific to Harmony such as theta, to give more diverse clusters
obj <- IntegrateLayers(object = obj, method = HarmonyIntegration, orig.reduction = "pca",
new.reduction = 'harmony', verbose = FALSE, theta = 3)
# Integrating SCTransformed data
obj <- SCTransform(object = obj)
obj <- IntegrateLayers(object = obj, method = HarmonyIntegration,
orig.reduction = "pca", new.reduction = 'harmony',
assay = "SCT", verbose = FALSE)
}
}
\seealso{
\code{\link[harmony:HarmonyMatrix]{harmony::HarmonyMatrix}()}
}
\concept{integration}
Seurat/man/RelativeCounts.Rd 0000644 0001762 0000144 00000001432 14525500037 015527 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{RelativeCounts}
\alias{RelativeCounts}
\title{Normalize raw data to fractions}
\usage{
RelativeCounts(data, scale.factor = 1, verbose = TRUE)
}
\arguments{
\item{data}{Matrix with the raw count data}
\item{scale.factor}{Scale the result. Default is 1}
\item{verbose}{Print progress}
}
\value{
Returns a matrix with the relative counts
}
\description{
Normalize count data to relative counts per cell by dividing by the total
per cell. Optionally use a scale factor, e.g. for counts per million (CPM)
use \code{scale.factor = 1e6}.
}
\examples{
mat <- matrix(data = rbinom(n = 25, size = 5, prob = 0.2), nrow = 5)
mat
mat_norm <- RelativeCounts(data = mat)
mat_norm
}
\concept{preprocessing}
Seurat/man/TopFeatures.Rd 0000644 0001762 0000144 00000001726 14525500037 015027 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{TopFeatures}
\alias{TopFeatures}
\title{Find features with highest scores for a given dimensional reduction technique}
\usage{
TopFeatures(
object,
dim = 1,
nfeatures = 20,
projected = FALSE,
balanced = FALSE,
...
)
}
\arguments{
\item{object}{DimReduc object}
\item{dim}{Dimension to use}
\item{nfeatures}{Number of features to return}
\item{projected}{Use the projected feature loadings}
\item{balanced}{Return an equal number of features with both + and - scores.}
\item{...}{Extra parameters passed to \code{\link{Loadings}}}
}
\value{
Returns a vector of features
}
\description{
Return a list of features with the strongest contribution to a set of components
}
\examples{
data("pbmc_small")
pbmc_small
TopFeatures(object = pbmc_small[["pca"]], dim = 1)
# After projection:
TopFeatures(object = pbmc_small[["pca"]], dim = 1, projected = TRUE)
}
\concept{objects}
Seurat/man/RunSLSI.Rd 0000644 0001762 0000144 00000003360 14525500037 014021 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/dimensional_reduction.R
\name{RunSLSI}
\alias{RunSLSI}
\alias{RunSLSI.default}
\alias{RunSLSI.Assay}
\alias{RunSLSI.Seurat}
\title{Run Supervised Latent Semantic Indexing}
\usage{
RunSLSI(object, ...)
\method{RunSLSI}{default}(
object,
assay = NULL,
n = 50,
reduction.key = "SLSI_",
graph = NULL,
verbose = TRUE,
seed.use = 42,
...
)
\method{RunSLSI}{Assay}(
object,
assay = NULL,
features = NULL,
n = 50,
reduction.key = "SLSI_",
graph = NULL,
verbose = TRUE,
seed.use = 42,
...
)
\method{RunSLSI}{Seurat}(
object,
assay = NULL,
features = NULL,
n = 50,
reduction.name = "slsi",
reduction.key = "SLSI_",
graph = NULL,
verbose = TRUE,
seed.use = 42,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to IRLBA irlba}
\item{assay}{Name of Assay SLSI is being run on}
\item{n}{Total Number of SLSI components to compute and store}
\item{reduction.key}{dimensional reduction key, specifies the string before
the number for the dimension names}
\item{graph}{Graph used supervised by SLSI}
\item{verbose}{Display messages}
\item{seed.use}{Set a random seed. Setting NULL will not set a seed.}
\item{features}{Features to compute SLSI on. If NULL, SLSI will be run
using the variable features for the Assay.}
\item{reduction.name}{dimensional reduction name}
}
\value{
Returns Seurat object with the SLSI calculation stored in the
reductions slot
}
\description{
Run a supervised LSI (SLSI) dimensionality reduction supervised by a
cell-cell kernel. SLSI is used to capture a linear transformation of peaks
that maximizes its dependency to the given cell-cell kernel.
}
\concept{dimensional_reduction}
Seurat/man/IFeaturePlot.Rd 0000644 0001762 0000144 00000001445 14525500037 015127 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{IFeaturePlot}
\alias{IFeaturePlot}
\title{Visualize features in dimensional reduction space interactively}
\usage{
IFeaturePlot(object, feature, dims = c(1, 2), reduction = NULL, slot = "data")
}
\arguments{
\item{object}{Seurat object}
\item{feature}{Feature to plot}
\item{dims}{Dimensions to plot, must be a two-length numeric vector specifying x- and y-dimensions}
\item{reduction}{Which dimensionality reduction to use. If not specified, first searches for umap, then tsne, then pca}
\item{slot}{Which slot to pull expression data from?}
}
\value{
Returns the final plot as a ggplot object
}
\description{
Visualize features in dimensional reduction space interactively
}
\concept{visualization}
Seurat/man/ExpVar.Rd 0000644 0001762 0000144 00000000655 14525500037 013773 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{ExpVar}
\alias{ExpVar}
\title{Calculate the variance of logged values}
\usage{
ExpVar(x)
}
\arguments{
\item{x}{A vector of values}
}
\value{
Returns the variance in log-space
}
\description{
Calculate variance of logged values in non-log space (return answer in
log-space)
}
\examples{
ExpVar(x = c(1, 2, 3))
}
\concept{utilities}
Seurat/man/SelectIntegrationFeatures.Rd 0000644 0001762 0000144 00000003537 14525500037 017712 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{SelectIntegrationFeatures}
\alias{SelectIntegrationFeatures}
\title{Select integration features}
\usage{
SelectIntegrationFeatures(
object.list,
nfeatures = 2000,
assay = NULL,
verbose = TRUE,
fvf.nfeatures = 2000,
...
)
}
\arguments{
\item{object.list}{List of seurat objects}
\item{nfeatures}{Number of features to return}
\item{assay}{Name or vector of assay names (one for each object) from which
to pull the variable features.}
\item{verbose}{Print messages}
\item{fvf.nfeatures}{nfeatures for \code{\link{FindVariableFeatures}}. Used
if \code{VariableFeatures} have not been set for any object in
\code{object.list}.}
\item{...}{Additional parameters to \code{\link{FindVariableFeatures}}}
}
\value{
A vector of selected features
}
\description{
Choose the features to use when integrating multiple datasets. This function
ranks features by the number of datasets they are deemed variable in,
breaking ties by the median variable feature rank across datasets. It returns
the top scoring features by this ranking.
}
\details{
If for any assay in the list, \code{\link{FindVariableFeatures}} hasn't been
run, this method will try to run it using the \code{fvf.nfeatures} parameter
and any additional ones specified through the \dots.
}
\examples{
\dontrun{
# to install the SeuratData package see https://github.com/satijalab/seurat-data
library(SeuratData)
data("panc8")
# panc8 is a merged Seurat object containing 8 separate pancreas datasets
# split the object by dataset and take the first 2
pancreas.list <- SplitObject(panc8, split.by = "tech")[1:2]
# perform SCTransform normalization
pancreas.list <- lapply(X = pancreas.list, FUN = SCTransform)
# select integration features
features <- SelectIntegrationFeatures(pancreas.list)
}
}
\concept{integration}
Seurat/man/CCAIntegration.Rd 0000644 0001762 0000144 00000007610 14525500037 015356 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration5.R
\name{CCAIntegration}
\alias{CCAIntegration}
\title{Seurat-CCA Integration}
\usage{
CCAIntegration(
object = NULL,
assay = NULL,
layers = NULL,
orig = NULL,
new.reduction = "integrated.dr",
reference = NULL,
features = NULL,
normalization.method = c("LogNormalize", "SCT"),
dims = 1:30,
k.filter = NA,
scale.layer = "scale.data",
dims.to.integrate = NULL,
k.weight = 100,
weight.reduction = NULL,
sd.weight = 1,
sample.tree = NULL,
preserve.order = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{A \code{Seurat} object}
\item{assay}{Name of \code{Assay} in the \code{Seurat} object}
\item{layers}{Names of layers in \code{assay}}
\item{orig}{A \link[SeuratObject:DimReduc]{dimensional reduction} to correct}
\item{new.reduction}{Name of new integrated dimensional reduction}
\item{reference}{A reference \code{Seurat} object}
\item{features}{A vector of features to use for integration}
\item{normalization.method}{Name of normalization method used: LogNormalize
or SCT}
\item{dims}{Dimensions of dimensional reduction to use for integration}
\item{k.filter}{Number of anchors to filter}
\item{scale.layer}{Name of scaled layer in \code{Assay}}
\item{dims.to.integrate}{Number of dimensions to return integrated values for}
\item{k.weight}{Number of neighbors to consider when weighting anchors}
\item{weight.reduction}{Dimension reduction to use when calculating anchor
weights. This can be one of:
\itemize{
\item{A string, specifying the name of a dimension reduction present in
all objects to be integrated}
\item{A vector of strings, specifying the name of a dimension reduction to
use for each object to be integrated}
\item{A vector of \code{\link{DimReduc}} objects, specifying the object to
use for each object in the integration}
\item{NULL, in which case the full corrected space is used for computing
anchor weights.}
}}
\item{sd.weight}{Controls the bandwidth of the Gaussian kernel for weighting}
\item{sample.tree}{Specify the order of integration. Order of integration
should be encoded in a matrix, where each row represents one of the pairwise
integration steps. Negative numbers specify a dataset, positive numbers
specify the integration results from a given row (the format of the merge
matrix included in the \code{\link{hclust}} function output). For example:
\code{matrix(c(-2, 1, -3, -1), ncol = 2)} gives:
\if{html}{\out{}}\preformatted{ [,1] [,2]
[1,] -2 -3
[2,] 1 -1
}\if{html}{\out{
}}
Which would cause dataset 2 and 3 to be integrated first, then the resulting
object integrated with dataset 1.
If NULL, the sample tree will be computed automatically.}
\item{preserve.order}{Do not reorder objects based on size for each pairwise
integration.}
\item{verbose}{Print progress}
\item{...}{Arguments passed on to \code{FindIntegrationAnchors}}
}
\description{
Seurat-CCA Integration
}
\examples{
\dontrun{
# Preprocessing
obj <- SeuratData::LoadData("pbmcsca")
obj[["RNA"]] <- split(obj[["RNA"]], f = obj$Method)
obj <- NormalizeData(obj)
obj <- FindVariableFeatures(obj)
obj <- ScaleData(obj)
obj <- RunPCA(obj)
# After preprocessing, we integrate layers.
obj <- IntegrateLayers(object = obj, method = CCAIntegration,
orig.reduction = "pca", new.reduction = "integrated.cca",
verbose = FALSE)
# Modifying parameters
# We can also specify parameters such as `k.anchor` to increase the strength of integration
obj <- IntegrateLayers(object = obj, method = CCAIntegration,
orig.reduction = "pca", new.reduction = "integrated.cca",
k.anchor = 20, verbose = FALSE)
# Integrating SCTransformed data
obj <- SCTransform(object = obj)
obj <- IntegrateLayers(object = obj, method = CCAIntegration,
orig.reduction = "pca", new.reduction = "integrated.cca",
assay = "SCT", verbose = FALSE)
}
}
Seurat/man/CustomPalette.Rd 0000644 0001762 0000144 00000002235 14525500037 015353 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{BlackAndWhite}
\alias{BlackAndWhite}
\alias{BlueAndRed}
\alias{CustomPalette}
\alias{PurpleAndYellow}
\title{Create a custom color palette}
\usage{
BlackAndWhite(mid = NULL, k = 50)
BlueAndRed(k = 50)
CustomPalette(low = "white", high = "red", mid = NULL, k = 50)
PurpleAndYellow(k = 50)
}
\arguments{
\item{mid}{middle color. Optional.}
\item{k}{number of steps (colors levels) to include between low and high values}
\item{low}{low color}
\item{high}{high color}
}
\value{
A color palette for plotting
}
\description{
Creates a custom color palette based on low, middle, and high color values
}
\examples{
df <- data.frame(x = rnorm(n = 100, mean = 20, sd = 2), y = rbinom(n = 100, size = 100, prob = 0.2))
plot(df, col = BlackAndWhite())
df <- data.frame(x = rnorm(n = 100, mean = 20, sd = 2), y = rbinom(n = 100, size = 100, prob = 0.2))
plot(df, col = BlueAndRed())
myPalette <- CustomPalette()
myPalette
df <- data.frame(x = rnorm(n = 100, mean = 20, sd = 2), y = rbinom(n = 100, size = 100, prob = 0.2))
plot(df, col = PurpleAndYellow())
}
\concept{visualization}
Seurat/man/PrepareBridgeReference.Rd 0000644 0001762 0000144 00000006105 14525500037 017114 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{PrepareBridgeReference}
\alias{PrepareBridgeReference}
\title{Prepare the bridge and reference datasets}
\usage{
PrepareBridgeReference(
reference,
bridge,
reference.reduction = "pca",
reference.dims = 1:50,
normalization.method = c("SCT", "LogNormalize"),
reference.assay = NULL,
bridge.ref.assay = "RNA",
bridge.query.assay = "ATAC",
supervised.reduction = c("slsi", "spca", NULL),
bridge.query.reduction = NULL,
bridge.query.features = NULL,
laplacian.reduction.name = "lap",
laplacian.reduction.key = "lap_",
laplacian.reduction.dims = 1:50,
verbose = TRUE
)
}
\arguments{
\item{reference}{A reference Seurat object}
\item{bridge}{A multi-omic bridge Seurat object}
\item{reference.reduction}{Name of dimensional reduction of the reference object (default is 'pca')}
\item{reference.dims}{Number of dimensions used for the reference.reduction (default is 50)}
\item{normalization.method}{Name of normalization method used: LogNormalize
or SCT}
\item{reference.assay}{Assay name for reference (default is \code{\link{DefaultAssay}})}
\item{bridge.ref.assay}{Assay name for bridge used for reference mapping. RNA by default}
\item{bridge.query.assay}{Assay name for bridge used for query mapping. ATAC by default}
\item{supervised.reduction}{Type of supervised dimensional reduction to be performed
for integrating the bridge and query.
#' Options are:
\itemize{
\item{slsi: Perform supervised LSI as the dimensional reduction for
the bridge-query integration}
\item{spca: Perform supervised PCA as the dimensional reduction for
the bridge-query integration}
\item{NULL: no supervised dimensional reduction will be calculated.
bridge.query.reduction is used for the bridge-query integration}
}}
\item{bridge.query.reduction}{Name of dimensions used for the bridge-query harmonization.
'bridge.query.reduction' and 'supervised.reduction' cannot be NULL together.}
\item{bridge.query.features}{Features used for bridge query dimensional reduction
(default is NULL which uses VariableFeatures from the bridge object)}
\item{laplacian.reduction.name}{Name of dimensional reduction name of graph laplacian eigenspace (default is 'lap')}
\item{laplacian.reduction.key}{Dimensional reduction key (default is 'lap_')}
\item{laplacian.reduction.dims}{Number of dimensions used for graph laplacian eigenspace (default is 50)}
\item{verbose}{Print progress and message (default is TRUE)}
}
\value{
Returns a \code{BridgeReferenceSet} that can be used as input to
\code{\link{FindBridgeTransferAnchors}}.
The parameters used are stored in the \code{BridgeReferenceSet} as well
}
\description{
Preprocess the multi-omic bridge and unimodal reference datasets into
an extended reference.
This function performs the following three steps:
1. Performs within-modality harmonization between bridge and reference
2. Performs dimensional reduction on the SNN graph of bridge datasets via
Laplacian Eigendecomposition
3. Constructs a bridge dictionary representation for unimodal reference cells
}
Seurat/man/FindIntegrationAnchors.Rd 0000644 0001762 0000144 00000013502 14525500037 017163 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{FindIntegrationAnchors}
\alias{FindIntegrationAnchors}
\title{Find integration anchors}
\usage{
FindIntegrationAnchors(
object.list = NULL,
assay = NULL,
reference = NULL,
anchor.features = 2000,
scale = TRUE,
normalization.method = c("LogNormalize", "SCT"),
sct.clip.range = NULL,
reduction = c("cca", "rpca", "jpca", "rlsi"),
l2.norm = TRUE,
dims = 1:30,
k.anchor = 5,
k.filter = 200,
k.score = 30,
max.features = 200,
nn.method = "annoy",
n.trees = 50,
eps = 0,
verbose = TRUE
)
}
\arguments{
\item{object.list}{A list of \code{\link{Seurat}} objects between which to
find anchors for downstream integration.}
\item{assay}{A vector of assay names specifying which assay to use when
constructing anchors. If NULL, the current default assay for each object is
used.}
\item{reference}{A vector specifying the object/s to be used as a reference
during integration. If NULL (default), all pairwise anchors are found (no
reference/s). If not NULL, the corresponding objects in \code{object.list}
will be used as references. When using a set of specified references, anchors
are first found between each query and each reference. The references are
then integrated through pairwise integration. Each query is then mapped to
the integrated reference.}
\item{anchor.features}{Can be either:
\itemize{
\item{A numeric value. This will call \code{\link{SelectIntegrationFeatures}}
to select the provided number of features to be used in anchor finding}
\item{A vector of features to be used as input to the anchor finding process}
}}
\item{scale}{Whether or not to scale the features provided. Only set to FALSE
if you have previously scaled the features you want to use for each object in
the object.list}
\item{normalization.method}{Name of normalization method used: LogNormalize
or SCT}
\item{sct.clip.range}{Numeric of length two specifying the min and max values
the Pearson residual will be clipped to}
\item{reduction}{Dimensional reduction to perform when finding anchors. Can
be one of:
\itemize{
\item{cca: Canonical correlation analysis}
\item{rpca: Reciprocal PCA}
\item{jpca: Joint PCA}
\item{rlsi: Reciprocal LSI}
}}
\item{l2.norm}{Perform L2 normalization on the CCA cell embeddings after
dimensional reduction}
\item{dims}{Which dimensions to use from the CCA to specify the neighbor
search space}
\item{k.anchor}{How many neighbors (k) to use when picking anchors}
\item{k.filter}{How many neighbors (k) to use when filtering anchors}
\item{k.score}{How many neighbors (k) to use when scoring anchors}
\item{max.features}{The maximum number of features to use when specifying the
neighborhood search space in the anchor filtering}
\item{nn.method}{Method for nearest neighbor finding. Options include: rann,
annoy}
\item{n.trees}{More trees gives higher precision when using annoy approximate
nearest neighbor search}
\item{eps}{Error bound on the neighbor finding algorithm (from RANN/Annoy)}
\item{verbose}{Print progress bars and output}
}
\value{
Returns an \code{\link{AnchorSet}} object that can be used as input to
\code{\link{IntegrateData}}.
}
\description{
Find a set of anchors between a list of \code{\link{Seurat}} objects.
These anchors can later be used to integrate the objects using the
\code{\link{IntegrateData}} function.
}
\details{
The main steps of this procedure are outlined below. For a more detailed
description of the methodology, please see Stuart, Butler, et al Cell 2019:
\doi{10.1016/j.cell.2019.05.031}; \doi{10.1101/460147}
First, determine anchor.features if not explicitly specified using
\code{\link{SelectIntegrationFeatures}}. Then for all pairwise combinations
of reference and query datasets:
\itemize{
\item{Perform dimensional reduction on the dataset pair as specified via
the \code{reduction} parameter. If \code{l2.norm} is set to \code{TRUE},
perform L2 normalization of the embedding vectors.}
\item{Identify anchors - pairs of cells from each dataset
that are contained within each other's neighborhoods (also known as mutual
nearest neighbors).}
\item{Filter low confidence anchors to ensure anchors in the low dimension
space are in broad agreement with the high dimensional measurements. This
is done by looking at the neighbors of each query cell in the reference
dataset using \code{max.features} to define this space. If the reference
cell isn't found within the first \code{k.filter} neighbors, remove the
anchor.}
\item{Assign each remaining anchor a score. For each anchor cell, determine
the nearest \code{k.score} anchors within its own dataset and within its
pair's dataset. Based on these neighborhoods, construct an overall neighbor
graph and then compute the shared neighbor overlap between anchor and query
cells (analogous to an SNN graph). We use the 0.01 and 0.90 quantiles on
these scores to dampen outlier effects and rescale to range between 0-1.}
}
}
\examples{
\dontrun{
# to install the SeuratData package see https://github.com/satijalab/seurat-data
library(SeuratData)
data("panc8")
# panc8 is a merged Seurat object containing 8 separate pancreas datasets
# split the object by dataset
pancreas.list <- SplitObject(panc8, split.by = "tech")
# perform standard preprocessing on each object
for (i in 1:length(pancreas.list)) {
pancreas.list[[i]] <- NormalizeData(pancreas.list[[i]], verbose = FALSE)
pancreas.list[[i]] <- FindVariableFeatures(
pancreas.list[[i]], selection.method = "vst",
nfeatures = 2000, verbose = FALSE
)
}
# find anchors
anchors <- FindIntegrationAnchors(object.list = pancreas.list)
# integrate data
integrated <- IntegrateData(anchorset = anchors)
}
}
\references{
Stuart T, Butler A, et al. Comprehensive Integration of
Single-Cell Data. Cell. 2019;177:1888-1902 \doi{10.1016/j.cell.2019.05.031}
}
\concept{integration}
Seurat/man/CellScatter.Rd 0000644 0001762 0000144 00000002457 14525500037 014775 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{CellScatter}
\alias{CellScatter}
\alias{CellPlot}
\title{Cell-cell scatter plot}
\usage{
CellScatter(
object,
cell1,
cell2,
features = NULL,
highlight = NULL,
cols = NULL,
pt.size = 1,
smooth = FALSE,
raster = NULL,
raster.dpi = c(512, 512)
)
}
\arguments{
\item{object}{Seurat object}
\item{cell1}{Cell 1 name}
\item{cell2}{Cell 2 name}
\item{features}{Features to plot (default, all features)}
\item{highlight}{Features to highlight}
\item{cols}{Colors to use for identity class plotting.}
\item{pt.size}{Size of the points on the plot}
\item{smooth}{Smooth the graph (similar to smoothScatter)}
\item{raster}{Convert points to raster format, default is \code{NULL}
which will automatically use raster if the number of points plotted is greater than
100,000}
\item{raster.dpi}{Pixel resolution for rasterized plots, passed to geom_scattermore().
Default is c(512, 512).}
}
\value{
A ggplot object
}
\description{
Creates a plot of scatter plot of features across two single cells. Pearson
correlation between the two cells is displayed above the plot.
}
\examples{
data("pbmc_small")
CellScatter(object = pbmc_small, cell1 = 'ATAGGAGAAACAGA', cell2 = 'CATCAGGATGCACA')
}
\concept{visualization}
Seurat/man/LocalStruct.Rd 0000644 0001762 0000144 00000002422 14525500037 015017 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{LocalStruct}
\alias{LocalStruct}
\title{Calculate the local structure preservation metric}
\usage{
LocalStruct(
object,
grouping.var,
idents = NULL,
neighbors = 100,
reduction = "pca",
reduced.dims = 1:10,
orig.dims = 1:10,
verbose = TRUE
)
}
\arguments{
\item{object}{Seurat object}
\item{grouping.var}{Grouping variable}
\item{idents}{Optionally specify a set of idents to compute metric for}
\item{neighbors}{Number of neighbors to compute in pca/corrected pca space}
\item{reduction}{Dimensional reduction to use for corrected space}
\item{reduced.dims}{Number of reduced dimensions to use}
\item{orig.dims}{Number of PCs to use in original space}
\item{verbose}{Display progress bar}
}
\value{
Returns the average preservation metric
}
\description{
Calculates a metric that describes how well the local structure of each group
prior to integration is preserved after integration. This procedure works as
follows: For each group, compute a PCA, compute the top num.neighbors in pca
space, compute the top num.neighbors in corrected pca space, compute the
size of the intersection of those two sets of neighbors.
Return the average over all groups.
}
\concept{integration}
Seurat/man/GetResidual.Rd 0000644 0001762 0000144 00000003140 14525500037 014766 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{GetResidual}
\alias{GetResidual}
\title{Calculate pearson residuals of features not in the scale.data}
\usage{
GetResidual(
object,
features,
assay = NULL,
umi.assay = "RNA",
clip.range = NULL,
replace.value = FALSE,
na.rm = TRUE,
verbose = TRUE
)
}
\arguments{
\item{object}{A seurat object}
\item{features}{Name of features to add into the scale.data}
\item{assay}{Name of the assay of the seurat object generated by SCTransform}
\item{umi.assay}{Name of the assay of the seurat object containing UMI matrix
and the default is RNA}
\item{clip.range}{Numeric of length two specifying the min and max values the
Pearson residual will be clipped to}
\item{replace.value}{Recalculate residuals for all features, even if they are
already present. Useful if you want to change the clip.range.}
\item{na.rm}{For features where there is no feature model stored, return NA
for residual value in scale.data when na.rm = FALSE. When na.rm is TRUE, only
return residuals for features with a model stored for all cells.}
\item{verbose}{Whether to print messages and progress bars}
}
\value{
Returns a Seurat object containing Pearson residuals of added
features in its scale.data
}
\description{
This function calls sctransform::get_residuals.
}
\examples{
\dontrun{
data("pbmc_small")
pbmc_small <- SCTransform(object = pbmc_small, variable.features.n = 20)
pbmc_small <- GetResidual(object = pbmc_small, features = c('MS4A1', 'TCL1A'))
}
}
\seealso{
\code{\link[sctransform]{get_residuals}}
}
\concept{preprocessing}
Seurat/man/PrepSCTIntegration.Rd 0000644 0001762 0000144 00000006226 14525500037 016252 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{PrepSCTIntegration}
\alias{PrepSCTIntegration}
\title{Prepare an object list normalized with sctransform for integration.}
\usage{
PrepSCTIntegration(
object.list,
assay = NULL,
anchor.features = 2000,
sct.clip.range = NULL,
verbose = TRUE
)
}
\arguments{
\item{object.list}{A list of \code{\link{Seurat}} objects to prepare for integration}
\item{assay}{The name of the \code{\link{Assay}} to use for integration. This can be a
single name if all the assays to be integrated have the same name, or a character vector
containing the name of each \code{\link{Assay}} in each object to be integrated. The
specified assays must have been normalized using \code{\link{SCTransform}}.
If NULL (default), the current default assay for each object is used.}
\item{anchor.features}{Can be either:
\itemize{
\item{A numeric value. This will call \code{\link{SelectIntegrationFeatures}}
to select the provided number of features to be used in anchor finding}
\item{A vector of features to be used as input to the anchor finding
process}
}}
\item{sct.clip.range}{Numeric of length two specifying the min and max values
the Pearson residual will be clipped to}
\item{verbose}{Display output/messages}
}
\value{
A list of \code{\link{Seurat}} objects with the appropriate \code{scale.data} slots
containing only the required \code{anchor.features}.
}
\description{
This function takes in a list of objects that have been normalized with the
\code{\link{SCTransform}} method and performs the following steps:
\itemize{
\item{If anchor.features is a numeric value, calls \code{\link{SelectIntegrationFeatures}}
to determine the features to use in the downstream integration procedure.}
\item{Ensures that the sctransform residuals for the features specified
to anchor.features are present in each object in the list. This is
necessary because the default behavior of \code{\link{SCTransform}} is to
only store the residuals for the features determined to be variable.
Residuals are recomputed for missing features using the stored model
parameters via the \code{\link{GetResidual}} function.}
\item{Subsets the \code{scale.data} slot to only contain the residuals for
anchor.features for efficiency in downstream processing. }
}
}
\examples{
\dontrun{
# to install the SeuratData package see https://github.com/satijalab/seurat-data
library(SeuratData)
data("panc8")
# panc8 is a merged Seurat object containing 8 separate pancreas datasets
# split the object by dataset and take the first 2 to integrate
pancreas.list <- SplitObject(panc8, split.by = "tech")[1:2]
# perform SCTransform normalization
pancreas.list <- lapply(X = pancreas.list, FUN = SCTransform)
# select integration features and prep step
features <- SelectIntegrationFeatures(pancreas.list)
pancreas.list <- PrepSCTIntegration(
pancreas.list,
anchor.features = features
)
# downstream integration steps
anchors <- FindIntegrationAnchors(
pancreas.list,
normalization.method = "SCT",
anchor.features = features
)
pancreas.integrated <- IntegrateData(anchors, normalization.method = "SCT")
}
}
\concept{integration}
Seurat/man/roxygen/ 0000755 0001762 0000144 00000000000 14525500037 013764 5 ustar ligges users Seurat/man/roxygen/templates/ 0000755 0001762 0000144 00000000000 14525500037 015762 5 ustar ligges users Seurat/man/roxygen/templates/section-progressr.R 0000644 0001762 0000144 00000001047 14525500037 021577 0 ustar ligges users #' @section Progress Updates with \pkg{progressr}:
#' This function uses
#' \href{https://cran.r-project.org/package=progressr}{\pkg{progressr}} to
#' render status updates and progress bars. To enable progress updates, wrap
#' the function call in \code{\link[progressr]{with_progress}} or run
#' \code{\link[progressr:handlers]{handlers(global = TRUE)}} before running
#' this function. For more details about \pkg{progressr}, please read
#' \href{https://progressr.futureverse.org/articles/progressr-intro.html}{\code{vignette("progressr-intro")}}
Seurat/man/roxygen/templates/seealso-methods.R 0000644 0001762 0000144 00000000110 14525500037 021171 0 ustar ligges users #' @seealso \code{<%= cls %>} methods: \code{\link{<%= cls %>-methods}}
Seurat/man/roxygen/templates/param-dotsm.R 0000644 0001762 0000144 00000000061 14525500037 020326 0 ustar ligges users #' @param ... Arguments passed to other methods
Seurat/man/roxygen/templates/note-reqdpkg.R 0000644 0001762 0000144 00000000211 14525500037 020477 0 ustar ligges users #' @note This function requires the
#' \href{https://cran.r-project.org/package=<%= pkg %>}{\pkg{<%= pkg %>}} package
#' to be installed
Seurat/man/roxygen/templates/section-future.R 0000644 0001762 0000144 00000001327 14525500037 021064 0 ustar ligges users #' @section Parallelization with \pkg{future}:
#' This function uses
#' \href{https://cran.r-project.org/package=future}{\pkg{future}} to enable
#' parallelization. Parallelization strategies can be set using
#' \code{\link[future]{plan}}. Common plans include \dQuote{\code{sequential}}
#' for non-parallelized processing or \dQuote{\code{multisession}} for parallel
#' evaluation using multiple \R sessions; for other plans, see the
#' \dQuote{Implemented evaluation strategies} section of
#' \code{\link[future:plan]{?future::plan}}. For a more thorough introduction
#' to \pkg{future}, see
#' \href{https://future.futureverse.org/articles/future-1-overview.html}{\code{vignette("future-1-overview")}}
#'
#' @concept future
Seurat/man/roxygen/templates/param-dotsi.R 0000644 0001762 0000144 00000000026 14525500037 020323 0 ustar ligges users #' @param ... Ignored
Seurat/man/BGTextColor.Rd 0000644 0001762 0000144 00000002235 14525500037 014716 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{BGTextColor}
\alias{BGTextColor}
\title{Determine text color based on background color}
\source{
\url{https://stackoverflow.com/questions/3942878/how-to-decide-font-color-in-white-or-black-depending-on-background-color}
}
\usage{
BGTextColor(
background,
threshold = 186,
w3c = FALSE,
dark = "black",
light = "white"
)
}
\arguments{
\item{background}{A vector of background colors; supports R color names and
hexadecimal codes}
\item{threshold}{Intensity threshold for light/dark cutoff; intensities
greater than \code{theshold} yield \code{dark}, others yield \code{light}}
\item{w3c}{Use \href{https://www.w3.org/TR/WCAG20/}{W3C} formula for calculating
background text color; ignores \code{threshold}}
\item{dark}{Color for dark text}
\item{light}{Color for light text}
}
\value{
A named vector of either \code{dark} or \code{light}, depending on
\code{background}; names of vector are \code{background}
}
\description{
Determine text color based on background color
}
\examples{
BGTextColor(background = c('black', 'white', '#E76BF3'))
}
\concept{visualization}
Seurat/man/NormalizeData.Rd 0000644 0001762 0000144 00000004070 14525500037 015313 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/preprocessing.R
\name{NormalizeData}
\alias{NormalizeData}
\alias{NormalizeData.V3Matrix}
\alias{NormalizeData.Assay}
\alias{NormalizeData.Seurat}
\title{Normalize Data}
\usage{
NormalizeData(object, ...)
\method{NormalizeData}{V3Matrix}(
object,
normalization.method = "LogNormalize",
scale.factor = 10000,
margin = 1,
block.size = NULL,
verbose = TRUE,
...
)
\method{NormalizeData}{Assay}(
object,
normalization.method = "LogNormalize",
scale.factor = 10000,
margin = 1,
verbose = TRUE,
...
)
\method{NormalizeData}{Seurat}(
object,
assay = NULL,
normalization.method = "LogNormalize",
scale.factor = 10000,
margin = 1,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods}
\item{normalization.method}{Method for normalization.
\itemize{
\item \dQuote{\code{LogNormalize}}: Feature counts for each cell are
divided by the total counts for that cell and multiplied by the
\code{scale.factor}. This is then natural-log transformed using \code{log1p}
\item \dQuote{\code{CLR}}: Applies a centered log ratio transformation
\item \dQuote{\code{RC}}: Relative counts. Feature counts for each cell
are divided by the total counts for that cell and multiplied by the
\code{scale.factor}. No log-transformation is applied. For counts per
million (CPM) set \code{scale.factor = 1e6}
}}
\item{scale.factor}{Sets the scale factor for cell-level normalization}
\item{margin}{If performing CLR normalization, normalize across features (1) or cells (2)}
\item{block.size}{How many cells should be run in each chunk, will try to split evenly across threads}
\item{verbose}{display progress bar for normalization procedure}
\item{assay}{Name of assay to use}
}
\value{
Returns object after normalization
}
\description{
Normalize the count data present in a given assay.
}
\examples{
\dontrun{
data("pbmc_small")
pbmc_small
pmbc_small <- NormalizeData(object = pbmc_small)
}
}
\concept{preprocessing}
Seurat/man/FindConservedMarkers.Rd 0000644 0001762 0000144 00000003760 14525500037 016644 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/differential_expression.R
\name{FindConservedMarkers}
\alias{FindConservedMarkers}
\title{Finds markers that are conserved between the groups}
\usage{
FindConservedMarkers(
object,
ident.1,
ident.2 = NULL,
grouping.var,
assay = "RNA",
slot = "data",
min.cells.group = 3,
meta.method = metap::minimump,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{An object}
\item{ident.1}{Identity class to define markers for}
\item{ident.2}{A second identity class for comparison. If NULL (default) -
use all other cells for comparison.}
\item{grouping.var}{grouping variable}
\item{assay}{of assay to fetch data for (default is RNA)}
\item{slot}{Slot to pull data from; note that if \code{test.use} is "negbinom", "poisson", or "DESeq2",
\code{slot} will be set to "counts"}
\item{min.cells.group}{Minimum number of cells in one of the groups}
\item{meta.method}{method for combining p-values. Should be a function from
the metap package (NOTE: pass the function, not a string)}
\item{verbose}{Print a progress bar once expression testing begins}
\item{\dots}{parameters to pass to FindMarkers}
}
\value{
data.frame containing a ranked list of putative conserved markers, and
associated statistics (p-values within each group and a combined p-value
(such as Fishers combined p-value or others from the metap package),
percentage of cells expressing the marker, average differences). Name of group is appended to each
associated output column (e.g. CTRL_p_val). If only one group is tested in the grouping.var, max
and combined p-values are not returned.
}
\description{
Finds markers that are conserved between the groups
}
\examples{
\dontrun{
data("pbmc_small")
pbmc_small
# Create a simulated grouping variable
pbmc_small[['groups']] <- sample(x = c('g1', 'g2'), size = ncol(x = pbmc_small), replace = TRUE)
FindConservedMarkers(pbmc_small, ident.1 = 0, ident.2 = 1, grouping.var = "groups")
}
}
\concept{differential_expression}
Seurat/man/AddAzimuthResults.Rd 0000644 0001762 0000144 00000001242 14525500037 016173 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{AddAzimuthResults}
\alias{AddAzimuthResults}
\title{Add Azimuth Results}
\usage{
AddAzimuthResults(object = NULL, filename)
}
\arguments{
\item{object}{A \code{\link[SeuratObject]{Seurat}} object}
\item{filename}{Path to Azimuth mapping scores file}
}
\value{
\code{object} with Azimuth results added
}
\description{
Add mapping and prediction scores, UMAP embeddings, and imputed assay (if
available)
from Azimuth to an existing or new \code{\link[SeuratObject]{Seurat}} object
}
\examples{
\dontrun{
object <- AddAzimuthResults(object, filename = "azimuth_results.Rds")
}
}
Seurat/man/PercentAbove.Rd 0000644 0001762 0000144 00000001076 14525500037 015141 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{PercentAbove}
\alias{PercentAbove}
\title{Calculate the percentage of a vector above some threshold}
\usage{
PercentAbove(x, threshold)
}
\arguments{
\item{x}{Vector of values}
\item{threshold}{Threshold to use when calculating percentage}
}
\value{
Returns the percentage of \code{x} values above the given threshold
}
\description{
Calculate the percentage of a vector above some threshold
}
\examples{
set.seed(42)
PercentAbove(sample(1:100, 10), 75)
}
\concept{utilities}
Seurat/man/IntegrateLayers.Rd 0000644 0001762 0000144 00000002212 14525500037 015657 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration5.R
\name{IntegrateLayers}
\alias{IntegrateLayers}
\title{Integrate Layers}
\usage{
IntegrateLayers(
object,
method,
orig.reduction = "pca",
assay = NULL,
features = NULL,
layers = NULL,
scale.layer = "scale.data",
...
)
}
\arguments{
\item{object}{A \code{\link[SeuratObject]{Seurat}} object}
\item{method}{Integration method function}
\item{orig.reduction}{Name of dimensional reduction for correction}
\item{assay}{Name of assay for integration}
\item{features}{A vector of features to use for integration}
\item{layers}{Names of normalized layers in \code{assay}}
\item{scale.layer}{Name(s) of scaled layer(s) in \code{assay}}
\item{...}{Arguments passed on to \code{method}}
}
\value{
\code{object} with integration data added to it
}
\description{
Integrate Layers
}
\section{Integration Method Functions}{
The following integration method functions are available:
\Sexpr[stage=render,results=rd]{Seurat:::.rd_methods("integration")}
}
\seealso{
\link[Seurat:writing-integration]{Writing integration method functions}
}
\concept{integration}
Seurat/man/AnchorSet-class.Rd 0000644 0001762 0000144 00000002763 14525500037 015561 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{AnchorSet-class}
\alias{AnchorSet-class}
\alias{AnchorSet}
\title{The AnchorSet Class}
\description{
The AnchorSet class is an intermediate data storage class that stores the anchors and other
related information needed for performing downstream analyses - namely data integration
(\code{\link{IntegrateData}}) and data transfer (\code{\link{TransferData}}).
}
\section{Slots}{
\describe{
\item{\code{object.list}}{List of objects used to create anchors}
\item{\code{reference.cells}}{List of cell names in the reference dataset - needed when performing data
transfer.}
\item{\code{reference.objects}}{Position of reference object/s in object.list}
\item{\code{query.cells}}{List of cell names in the query dataset - needed when performing data transfer}
\item{\code{anchors}}{The anchor matrix. This contains the cell indices of both anchor pair cells, the
anchor score, and the index of the original dataset in the object.list for cell1 and cell2 of
the anchor.}
\item{\code{offsets}}{The offsets used to enable cell look up in downstream functions}
\item{\code{weight.reduction}}{The weight dimensional reduction used to calculate weight matrix}
\item{\code{anchor.features}}{The features used when performing anchor finding.}
\item{\code{neighbors}}{List containing Neighbor objects for reuse later (e.g. mapping)}
\item{\code{command}}{Store log of parameters that were used}
}}
\concept{objects}
Seurat/man/UpdateSymbolList.Rd 0000644 0001762 0000144 00000004255 14525500037 016032 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{UpdateSymbolList}
\alias{UpdateSymbolList}
\alias{GeneSymbolThesarus}
\title{Get updated synonyms for gene symbols}
\source{
\url{https://www.genenames.org/} \url{https://www.genenames.org/help/rest/}
}
\usage{
GeneSymbolThesarus(
symbols,
timeout = 10,
several.ok = FALSE,
search.types = c("alias_symbol", "prev_symbol"),
verbose = TRUE,
...
)
UpdateSymbolList(
symbols,
timeout = 10,
several.ok = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{symbols}{A vector of gene symbols}
\item{timeout}{Time to wait before canceling query in seconds}
\item{several.ok}{Allow several current gene symbols for each
provided symbol}
\item{search.types}{Type of query to perform:
\describe{
\item{\dQuote{\code{alias_symbol}}}{Find alternate symbols for the genes
described by \code{symbols}}
\item{\dQuote{\code{prev_symbol}}}{Find new new symbols for the genes
described by \code{symbols}}
}
This parameter accepts multiple options and short-hand options
(eg. \dQuote{\code{prev}} for \dQuote{\code{prev_symbol}})}
\item{verbose}{Show a progress bar depicting search progress}
\item{...}{Extra parameters passed to \code{\link[httr]{GET}}}
}
\value{
\code{GeneSymbolThesarus}:, if \code{several.ok}, a named list
where each entry is the current symbol found for each symbol provided and
the names are the provided symbols. Otherwise, a named vector with the
same information.
\code{UpdateSymbolList}: \code{symbols} with updated symbols from
HGNC's gene names database
}
\description{
Find current gene symbols based on old or alias symbols using the gene
names database from the HUGO Gene Nomenclature Committee (HGNC)
}
\details{
For each symbol passed, we query the HGNC gene names database for
current symbols that have the provided symbol as either an alias
(\code{alias_symbol}) or old (\code{prev_symbol}) symbol. All other queries
are \strong{not} supported.
}
\note{
This function requires internet access
}
\examples{
\dontrun{
GeneSybmolThesarus(symbols = c("FAM64A"))
}
\dontrun{
UpdateSymbolList(symbols = cc.genes$s.genes)
}
}
\seealso{
\code{\link[httr]{GET}}
}
\concept{utilities}
Seurat/man/MappingScore.Rd 0000644 0001762 0000144 00000005404 14525500037 015152 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/integration.R
\name{MappingScore}
\alias{MappingScore}
\alias{MappingScore.default}
\alias{MappingScore.AnchorSet}
\title{Metric for evaluating mapping success}
\usage{
MappingScore(anchors, ...)
\method{MappingScore}{default}(
anchors,
combined.object,
query.neighbors,
ref.embeddings,
query.embeddings,
kanchors = 50,
ndim = 50,
ksmooth = 100,
ksnn = 20,
snn.prune = 0,
subtract.first.nn = TRUE,
nn.method = "annoy",
n.trees = 50,
query.weights = NULL,
verbose = TRUE,
...
)
\method{MappingScore}{AnchorSet}(
anchors,
kanchors = 50,
ndim = 50,
ksmooth = 100,
ksnn = 20,
snn.prune = 0,
subtract.first.nn = TRUE,
nn.method = "annoy",
n.trees = 50,
query.weights = NULL,
verbose = TRUE,
...
)
}
\arguments{
\item{anchors}{AnchorSet object or just anchor matrix from the
Anchorset object returned from FindTransferAnchors}
\item{...}{Reserved for internal use}
\item{combined.object}{Combined object (ref + query) from the
Anchorset object returned}
\item{query.neighbors}{Neighbors object computed on query cells}
\item{ref.embeddings}{Reference embeddings matrix}
\item{query.embeddings}{Query embeddings matrix}
\item{kanchors}{Number of anchors to use in projection steps when computing
weights}
\item{ndim}{Number of dimensions to use when working with low dimensional
projections of the data}
\item{ksmooth}{Number of cells to average over when computing transition
probabilities}
\item{ksnn}{Number of cells to average over when determining the kernel
bandwidth from the SNN graph}
\item{snn.prune}{Amount of pruning to apply to edges in SNN graph}
\item{subtract.first.nn}{Option to the scoring function when computing
distances to subtract the distance to the first nearest neighbor}
\item{nn.method}{Nearest neighbor method to use (annoy or RANN)}
\item{n.trees}{More trees gives higher precision when using annoy approximate
nearest neighbor search}
\item{query.weights}{Query weights matrix for reuse}
\item{verbose}{Display messages/progress}
}
\value{
Returns a vector of cell scores
}
\description{
This metric was designed to help identify query cells that aren't well
represented in the reference dataset. The intuition for the score is that we
are going to project the query cells into a reference-defined space and then
project them back onto the query. By comparing the neighborhoods before and
after projection, we identify cells who's local neighborhoods are the most
affected by this transformation. This could be because there is a population
of query cells that aren't present in the reference or the state of the cells
in the query is significantly different from the equivalent cell type in the
reference.
}
\concept{integration}
Seurat/man/LogVMR.Rd 0000644 0001762 0000144 00000000751 14525500037 013671 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{LogVMR}
\alias{LogVMR}
\title{Calculate the variance to mean ratio of logged values}
\usage{
LogVMR(x, ...)
}
\arguments{
\item{x}{A vector of values}
\item{...}{Other arguments (not used)}
}
\value{
Returns the VMR in log-space
}
\description{
Calculate the variance to mean ratio (VMR) in non-logspace (return answer in
log-space)
}
\examples{
LogVMR(x = c(1, 2, 3))
}
\concept{utilities}
Seurat/man/RidgePlot.Rd 0000644 0001762 0000144 00000003741 14525500037 014456 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{RidgePlot}
\alias{RidgePlot}
\title{Single cell ridge plot}
\usage{
RidgePlot(
object,
features,
cols = NULL,
idents = NULL,
sort = FALSE,
assay = NULL,
group.by = NULL,
y.max = NULL,
same.y.lims = FALSE,
log = FALSE,
ncol = NULL,
slot = deprecated(),
layer = "data",
stack = FALSE,
combine = TRUE,
fill.by = "feature"
)
}
\arguments{
\item{object}{Seurat object}
\item{features}{Features to plot (gene expression, metrics, PC scores,
anything that can be retreived by FetchData)}
\item{cols}{Colors to use for plotting}
\item{idents}{Which classes to include in the plot (default is all)}
\item{sort}{Sort identity classes (on the x-axis) by the average
expression of the attribute being potted, can also pass 'increasing' or 'decreasing' to change sort direction}
\item{assay}{Name of assay to use, defaults to the active assay}
\item{group.by}{Group (color) cells in different ways (for example, orig.ident)}
\item{y.max}{Maximum y axis value}
\item{same.y.lims}{Set all the y-axis limits to the same values}
\item{log}{plot the feature axis on log scale}
\item{ncol}{Number of columns if multiple plots are displayed}
\item{slot}{Slot to pull expression data from (e.g. "counts" or "data")}
\item{layer}{Layer to pull expression data from (e.g. "counts" or "data")}
\item{stack}{Horizontally stack plots for each feature}
\item{combine}{Combine plots into a single \code{\link[patchwork]{patchwork}ed}
ggplot object. If \code{FALSE}, return a list of ggplot}
\item{fill.by}{Color violins/ridges based on either 'feature' or 'ident'}
}
\value{
A \code{\link[patchwork]{patchwork}ed} ggplot object if
\code{combine = TRUE}; otherwise, a list of ggplot objects
}
\description{
Draws a ridge plot of single cell data (gene expression, metrics, PC
scores, etc.)
}
\examples{
data("pbmc_small")
RidgePlot(object = pbmc_small, features = 'PC_1')
}
\concept{visualization}
Seurat/man/FindBridgeTransferAnchors.Rd 0000644 0001762 0000144 00000004526 14525500037 017607 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{FindBridgeTransferAnchors}
\alias{FindBridgeTransferAnchors}
\title{Find bridge anchors between query and extended bridge-reference}
\usage{
FindBridgeTransferAnchors(
extended.reference,
query,
query.assay = NULL,
dims = 1:30,
scale = FALSE,
reduction = c("lsiproject", "pcaproject"),
bridge.reduction = c("direct", "cca"),
verbose = TRUE
)
}
\arguments{
\item{extended.reference}{BridgeReferenceSet object generated from
\code{\link{PrepareBridgeReference}}}
\item{query}{A query Seurat object}
\item{query.assay}{Assay name for query-bridge integration}
\item{dims}{Number of dimensions for query-bridge integration}
\item{scale}{Determine if scale the query data for projection}
\item{reduction}{Dimensional reduction to perform when finding anchors.
Options are:
\itemize{
\item{pcaproject: Project the PCA from the bridge onto the query. We
recommend using PCA when bridge and query datasets are from scRNA-seq}
\item{lsiproject: Project the LSI from the bridge onto the query. We
recommend using LSI when bridge and query datasets are from scATAC-seq or scCUT&TAG data.
This requires that LSI or supervised LSI has been computed for the bridge dataset, and the
same features (eg, peaks or genome bins) are present in both the bridge
and query.
}
}}
\item{bridge.reduction}{Dimensional reduction to perform when finding anchors. Can
be one of:
\itemize{
\item{cca: Canonical correlation analysis}
\item{direct: Use assay data as a dimensional reduction}
}}
\item{verbose}{Print messages and progress}
}
\value{
Returns an \code{AnchorSet} object that can be used as input to
\code{\link{TransferData}}, \code{\link{IntegrateEmbeddings}} and
\code{\link{MapQuery}}.
}
\description{
Find a set of anchors between unimodal query and the other unimodal reference
using a pre-computed \code{\link{BridgeReferenceSet}}.
This function performs three steps:
1. Harmonize the bridge and query cells in the bridge query reduction space
2. Construct the bridge dictionary representations for query cells
3. Find a set of anchors between query and reference in the bridge graph laplacian eigenspace
These anchors can later be used to integrate embeddings or transfer data from the reference to
query object using the \code{\link{MapQuery}} object.
}
Seurat/man/SpatialImage-class.Rd 0000644 0001762 0000144 00000000614 14525500037 016224 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reexports.R
\docType{class}
\name{SpatialImage-class}
\alias{SpatialImage-class}
\title{The SpatialImage Class}
\description{
For more details, please see the documentation in
\code{\link[SeuratObject:SpatialImage]{SeuratObject}}
}
\seealso{
\code{\link[SeuratObject:SpatialImage]{SeuratObject::SpatialImage-class}}
}
Seurat/man/ElbowPlot.Rd 0000644 0001762 0000144 00000001440 14525500037 014466 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{ElbowPlot}
\alias{ElbowPlot}
\title{Quickly Pick Relevant Dimensions}
\usage{
ElbowPlot(object, ndims = 20, reduction = "pca")
}
\arguments{
\item{object}{Seurat object}
\item{ndims}{Number of dimensions to plot standard deviation for}
\item{reduction}{Reduction technique to plot standard deviation for}
}
\value{
A ggplot object
}
\description{
Plots the standard deviations (or approximate singular values if running PCAFast)
of the principle components for easy identification of an elbow in the graph.
This elbow often corresponds well with the significant dims and is much faster to run than
Jackstraw
}
\examples{
data("pbmc_small")
ElbowPlot(object = pbmc_small)
}
\concept{visualization}
Seurat/man/RenameCells.Rd 0000644 0001762 0000144 00000001431 14525500037 014751 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{RenameCells.SCTAssay}
\alias{RenameCells.SCTAssay}
\alias{RenameCells.SlideSeq}
\alias{RenameCells.STARmap}
\alias{RenameCells.VisiumV1}
\title{Rename Cells in an Object}
\usage{
\method{RenameCells}{SCTAssay}(object, new.names = NULL, ...)
\method{RenameCells}{SlideSeq}(object, new.names = NULL, ...)
\method{RenameCells}{STARmap}(object, new.names = NULL, ...)
\method{RenameCells}{VisiumV1}(object, new.names = NULL, ...)
}
\arguments{
\item{object}{An object}
\item{new.names}{vector of new cell names}
\item{...}{Arguments passed to other methods}
}
\description{
Rename Cells in an Object
}
\seealso{
\code{\link[SeuratObject:RenameCells]{SeuratObject::RenameCells}}
}
\concept{objects}
Seurat/man/UpdateSCTAssays.Rd 0000644 0001762 0000144 00000000741 14525500037 015542 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{UpdateSCTAssays}
\alias{UpdateSCTAssays}
\title{Update pre-V4 Assays generated with SCTransform in the Seurat to the new
SCTAssay class}
\usage{
UpdateSCTAssays(object)
}
\arguments{
\item{object}{A Seurat object}
}
\value{
A Seurat object with updated SCTAssays
}
\description{
Update pre-V4 Assays generated with SCTransform in the Seurat to the new
SCTAssay class
}
\concept{objects}
Seurat/man/SCTransform.Rd 0000644 0001762 0000144 00000013534 14525500037 014767 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/preprocessing.R,
% R/preprocessing5.R
\name{SCTransform}
\alias{SCTransform}
\alias{SCTransform.default}
\alias{SCTransform.Assay}
\alias{SCTransform.Seurat}
\alias{SCTransform.IterableMatrix}
\title{Perform sctransform-based normalization}
\usage{
SCTransform(object, ...)
\method{SCTransform}{default}(
object,
cell.attr,
reference.SCT.model = NULL,
do.correct.umi = TRUE,
ncells = 5000,
residual.features = NULL,
variable.features.n = 3000,
variable.features.rv.th = 1.3,
vars.to.regress = NULL,
do.scale = FALSE,
do.center = TRUE,
clip.range = c(-sqrt(x = ncol(x = umi)/30), sqrt(x = ncol(x = umi)/30)),
vst.flavor = "v2",
conserve.memory = FALSE,
return.only.var.genes = TRUE,
seed.use = 1448145,
verbose = TRUE,
...
)
\method{SCTransform}{Assay}(
object,
cell.attr,
reference.SCT.model = NULL,
do.correct.umi = TRUE,
ncells = 5000,
residual.features = NULL,
variable.features.n = 3000,
variable.features.rv.th = 1.3,
vars.to.regress = NULL,
do.scale = FALSE,
do.center = TRUE,
clip.range = c(-sqrt(x = ncol(x = object)/30), sqrt(x = ncol(x = object)/30)),
vst.flavor = "v2",
conserve.memory = FALSE,
return.only.var.genes = TRUE,
seed.use = 1448145,
verbose = TRUE,
...
)
\method{SCTransform}{Seurat}(
object,
assay = "RNA",
new.assay.name = "SCT",
reference.SCT.model = NULL,
do.correct.umi = TRUE,
ncells = 5000,
residual.features = NULL,
variable.features.n = 3000,
variable.features.rv.th = 1.3,
vars.to.regress = NULL,
do.scale = FALSE,
do.center = TRUE,
clip.range = c(-sqrt(x = ncol(x = object[[assay]])/30), sqrt(x = ncol(x =
object[[assay]])/30)),
vst.flavor = "v2",
conserve.memory = FALSE,
return.only.var.genes = TRUE,
seed.use = 1448145,
verbose = TRUE,
...
)
\method{SCTransform}{IterableMatrix}(
object,
cell.attr,
reference.SCT.model = NULL,
do.correct.umi = TRUE,
ncells = 5000,
residual.features = NULL,
variable.features.n = 3000,
variable.features.rv.th = 1.3,
vars.to.regress = NULL,
do.scale = FALSE,
do.center = TRUE,
clip.range = c(-sqrt(x = ncol(x = object)/30), sqrt(x = ncol(x = object)/30)),
vst.flavor = "v2",
conserve.memory = FALSE,
return.only.var.genes = TRUE,
seed.use = 1448145,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{UMI counts matrix}
\item{...}{Additional parameters passed to \code{sctransform::vst}}
\item{cell.attr}{A metadata with cell attributes}
\item{reference.SCT.model}{If not NULL, compute residuals for the object
using the provided SCT model; supports only log_umi as the latent variable.
If residual.features are not specified, compute for the top variable.features.n
specified in the model which are also present in the object. If
residual.features are specified, the variable features of the resulting SCT
assay are set to the top variable.features.n in the model.}
\item{do.correct.umi}{Place corrected UMI matrix in assay counts slot; default is TRUE}
\item{ncells}{Number of subsampling cells used to build NB regression; default is 5000}
\item{residual.features}{Genes to calculate residual features for; default is NULL (all genes).
If specified, will be set to VariableFeatures of the returned object.}
\item{variable.features.n}{Use this many features as variable features after
ranking by residual variance; default is 3000. Only applied if residual.features is not set.}
\item{variable.features.rv.th}{Instead of setting a fixed number of variable features,
use this residual variance cutoff; this is only used when \code{variable.features.n}
is set to NULL; default is 1.3. Only applied if residual.features is not set.}
\item{vars.to.regress}{Variables to regress out in a second non-regularized linear
regression. For example, percent.mito. Default is NULL}
\item{do.scale}{Whether to scale residuals to have unit variance; default is FALSE}
\item{do.center}{Whether to center residuals to have mean zero; default is TRUE}
\item{clip.range}{Range to clip the residuals to; default is \code{c(-sqrt(n/30), sqrt(n/30))},
where n is the number of cells}
\item{vst.flavor}{When set to 'v2' sets method = glmGamPoi_offset, n_cells=2000,
and exclude_poisson = TRUE which causes the model to learn theta and intercept
only besides excluding poisson genes from learning and regularization}
\item{conserve.memory}{If set to TRUE the residual matrix for all genes is never
created in full; useful for large data sets, but will take longer to run;
this will also set return.only.var.genes to TRUE; default is FALSE}
\item{return.only.var.genes}{If set to TRUE the scale.data matrices in output assay are
subset to contain only the variable genes; default is TRUE}
\item{seed.use}{Set a random seed. By default, sets the seed to 1448145. Setting
NULL will not set a seed.}
\item{verbose}{Whether to print messages and progress bars}
\item{assay}{Name of assay to pull the count data from; default is 'RNA'}
\item{new.assay.name}{Name for the new assay containing the normalized data; default is 'SCT'}
}
\value{
Returns a Seurat object with a new assay (named SCT by default) with
counts being (corrected) counts, data being log1p(counts), scale.data being
pearson residuals; sctransform::vst intermediate results are saved in misc
slot of the new assay.
}
\description{
This function calls sctransform::vst. The sctransform package is available at
https://github.com/satijalab/sctransform.
Use this function as an alternative to the NormalizeData,
FindVariableFeatures, ScaleData workflow. Results are saved in a new assay
(named SCT by default) with counts being (corrected) counts, data being log1p(counts),
scale.data being pearson residuals; sctransform::vst intermediate results are saved
in misc slot of new assay.
}
\seealso{
\code{\link[sctransform]{correct_counts}} \code{\link[sctransform]{get_residuals}}
}
\concept{preprocessing}
Seurat/man/FastRPCAIntegration.Rd 0000644 0001762 0000144 00000004610 14525500037 016330 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{FastRPCAIntegration}
\alias{FastRPCAIntegration}
\title{Perform integration on the joint PCA cell embeddings.}
\usage{
FastRPCAIntegration(
object.list,
reference = NULL,
anchor.features = 2000,
k.anchor = 20,
dims = 1:30,
scale = TRUE,
normalization.method = c("LogNormalize", "SCT"),
new.reduction.name = "integrated_dr",
npcs = 50,
findintegrationanchors.args = list(),
verbose = TRUE
)
}
\arguments{
\item{object.list}{A list of \code{\link{Seurat}} objects between which to
find anchors for downstream integration.}
\item{reference}{A vector specifying the object/s to be used as a reference
during integration. If NULL (default), all pairwise anchors are found (no
reference/s). If not NULL, the corresponding objects in \code{object.list}
will be used as references. When using a set of specified references, anchors
are first found between each query and each reference. The references are
then integrated through pairwise integration. Each query is then mapped to
the integrated reference.}
\item{anchor.features}{Can be either:
\itemize{
\item{A numeric value. This will call \code{\link{SelectIntegrationFeatures}}
to select the provided number of features to be used in anchor finding}
\item{A vector of features to be used as input to the anchor finding process}
}}
\item{k.anchor}{How many neighbors (k) to use when picking anchors}
\item{dims}{Which dimensions to use from the CCA to specify the neighbor
search space}
\item{scale}{Whether or not to scale the features provided. Only set to FALSE
if you have previously scaled the features you want to use for each object in
the object.list}
\item{normalization.method}{Name of normalization method used: LogNormalize
or SCT}
\item{new.reduction.name}{Name of integrated dimensional reduction}
\item{npcs}{Total Number of PCs to compute and store (50 by default)}
\item{findintegrationanchors.args}{A named list of additional arguments to
\code{\link{FindIntegrationAnchors}}}
\item{verbose}{Print messages and progress}
}
\value{
Returns a Seurat object with integrated dimensional reduction
}
\description{
This is a convenience wrapper function around the following three functions
that are often run together when perform integration.
#' \code{\link{FindIntegrationAnchors}}, \code{\link{RunPCA}},
\code{\link{IntegrateEmbeddings}}.
}
Seurat/man/LogNormalize.Rd 0000644 0001762 0000144 00000002204 14525500037 015160 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/preprocessing.R,
% R/preprocessing5.R
\name{LogNormalize}
\alias{LogNormalize}
\alias{LogNormalize.data.frame}
\alias{LogNormalize.V3Matrix}
\alias{LogNormalize.default}
\title{Normalize Raw Data}
\usage{
LogNormalize(data, scale.factor = 10000, margin = 2L, verbose = TRUE, ...)
\method{LogNormalize}{data.frame}(data, scale.factor = 10000, margin = 2L, verbose = TRUE, ...)
\method{LogNormalize}{V3Matrix}(data, scale.factor = 10000, margin = 2L, verbose = TRUE, ...)
\method{LogNormalize}{default}(data, scale.factor = 10000, margin = 2L, verbose = TRUE, ...)
}
\arguments{
\item{data}{Matrix with the raw count data}
\item{scale.factor}{Scale the data; default is \code{1e4}}
\item{margin}{Margin to normalize over}
\item{verbose}{Print progress}
\item{...}{Arguments passed to other methods}
}
\value{
A matrix with the normalized and log-transformed data
}
\description{
Normalize Raw Data
}
\examples{
mat <- matrix(data = rbinom(n = 25, size = 5, prob = 0.2), nrow = 5)
mat
mat_norm <- LogNormalize(data = mat)
mat_norm
}
\concept{preprocessing}
Seurat/man/Read10X_Image.Rd 0000644 0001762 0000144 00000001405 14525500037 015026 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{Read10X_Image}
\alias{Read10X_Image}
\title{Load a 10X Genomics Visium Image}
\usage{
Read10X_Image(image.dir, filter.matrix = TRUE, ...)
}
\arguments{
\item{image.dir}{Path to directory with 10X Genomics visium image data;
should include files \code{tissue_lowres_iamge.png},
\code{scalefactors_json.json} and \code{tissue_positions_list.csv}}
\item{filter.matrix}{Filter spot/feature matrix to only include spots that
have been determined to be over tissue.}
\item{...}{Ignored for now}
}
\value{
A \code{\link{VisiumV1}} object
}
\description{
Load a 10X Genomics Visium Image
}
\seealso{
\code{\link{VisiumV1}} \code{\link{Load10X_Spatial}}
}
\concept{preprocessing}
Seurat/man/Seurat-package.Rd 0000644 0001762 0000144 00000011047 14525500037 015417 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\docType{package}
\name{Seurat-package}
\alias{Seurat}
\alias{Seurat-package}
\title{Seurat: Tools for Single Cell Genomics}
\description{
A toolkit for quality control, analysis, and exploration of single cell RNA sequencing data. 'Seurat' aims to enable users to identify and interpret sources of heterogeneity from single cell transcriptomic measurements, and to integrate diverse types of single cell data. See Satija R, Farrell J, Gennert D, et al (2015) \doi{10.1038/nbt.3192}, Macosko E, Basu A, Satija R, et al (2015) \doi{10.1016/j.cell.2015.05.002}, Stuart T, Butler A, et al (2019) \doi{10.1016/j.cell.2019.05.031}, and Hao, Hao, et al (2020) \doi{10.1101/2020.10.12.335331} for more details.
}
\section{Package options}{
Seurat uses the following [options()] to configure behaviour:
\describe{
\item{\code{Seurat.memsafe}}{global option to call gc() after many operations.
This can be helpful in cleaning up the memory status of the R session and
prevent use of swap space. However, it does add to the computational overhead
and setting to FALSE can speed things up if you're working in an environment
where RAM availability is not a concern.}
\item{\code{Seurat.warn.umap.uwot}}{Show warning about the default backend
for \code{\link{RunUMAP}} changing from Python UMAP via reticulate to UWOT}
\item{\code{Seurat.checkdots}}{For functions that have ... as a parameter,
this controls the behavior when an item isn't used. Can be one of warn,
stop, or silent.}
\item{\code{Seurat.limma.wilcox.msg}}{{Show message about more efficient
Wilcoxon Rank Sum test available via the limma package}}
\item{\code{Seurat.Rfast2.msg}}{{Show message about more efficient
Moran's I function available via the Rfast2 package}}
\item{\code{Seurat.warn.vlnplot.split}}{Show message about changes to
default behavior of split/multi violin plots}
}
}
\seealso{
Useful links:
\itemize{
\item \url{https://satijalab.org/seurat}
\item \url{https://github.com/satijalab/seurat}
\item Report bugs at \url{https://github.com/satijalab/seurat/issues}
}
}
\author{
\strong{Maintainer}: Rahul Satija \email{seurat@nygenome.org} (\href{https://orcid.org/0000-0001-9448-8833}{ORCID})
Other contributors:
\itemize{
\item Andrew Butler \email{abutler@nygenome.org} (\href{https://orcid.org/0000-0003-3608-0463}{ORCID}) [contributor]
\item Saket Choudhary \email{schoudhary@nygenome.org} (\href{https://orcid.org/0000-0001-5202-7633}{ORCID}) [contributor]
\item Charlotte Darby \email{cdarby@nygenome.org} (\href{https://orcid.org/0000-0003-2195-5300}{ORCID}) [contributor]
\item Jeff Farrell \email{jfarrell@g.harvard.edu} [contributor]
\item Isabella Grabski \email{igrabski@nygenome.org} (\href{https://orcid.org/0000-0002-0616-5469}{ORCID}) [contributor]
\item Christoph Hafemeister \email{chafemeister@nygenome.org} (\href{https://orcid.org/0000-0001-6365-8254}{ORCID}) [contributor]
\item Yuhan Hao \email{yhao@nygenome.org} (\href{https://orcid.org/0000-0002-1810-0822}{ORCID}) [contributor]
\item Austin Hartman \email{ahartman@nygenome.org} (\href{https://orcid.org/0000-0001-7278-1852}{ORCID}) [contributor]
\item Paul Hoffman \email{hoff0792@umn.edu} (\href{https://orcid.org/0000-0002-7693-8957}{ORCID}) [contributor]
\item Jaison Jain \email{jjain@nygenome.org} (\href{https://orcid.org/0000-0002-9478-5018}{ORCID}) [contributor]
\item Longda Jiang \email{ljiang@nygenome.org} (\href{https://orcid.org/0000-0003-4964-6497}{ORCID}) [contributor]
\item Madeline Kowalski \email{mkowalski@nygenome.org} (\href{https://orcid.org/0000-0002-5655-7620}{ORCID}) [contributor]
\item Skylar Li \email{sli@nygenome.org} [contributor]
\item Gesmira Molla \email{gmolla@nygenome.org} (\href{https://orcid.org/0000-0002-8628-5056}{ORCID}) [contributor]
\item Efthymia Papalexi \email{epapalexi@nygenome.org} (\href{https://orcid.org/0000-0001-5898-694X}{ORCID}) [contributor]
\item Patrick Roelli \email{proelli@nygenome.org} [contributor]
\item Karthik Shekhar \email{kshekhar@berkeley.edu} [contributor]
\item Avi Srivastava \email{asrivastava@nygenome.org} (\href{https://orcid.org/0000-0001-9798-2079}{ORCID}) [contributor]
\item Tim Stuart \email{tstuart@nygenome.org} (\href{https://orcid.org/0000-0002-3044-0897}{ORCID}) [contributor]
\item Kristof Torkenczy (\href{https://orcid.org/0000-0002-4869-7957}{ORCID}) [contributor]
\item Shiwei Zheng \email{szheng@nygenome.org} (\href{https://orcid.org/0000-0001-6682-6743}{ORCID}) [contributor]
\item Satija Lab and Collaborators [funder]
}
}
Seurat/man/HVFInfo.SCTAssay.Rd 0000644 0001762 0000144 00000001365 14525500037 015455 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{HVFInfo.SCTAssay}
\alias{HVFInfo.SCTAssay}
\title{Get Variable Feature Information}
\usage{
\method{HVFInfo}{SCTAssay}(object, method, status = FALSE, ...)
}
\arguments{
\item{object}{An object}
\item{method}{method to determine variable features}
\item{status}{Add variable status to the resulting data frame}
\item{...}{Arguments passed to other methods}
}
\description{
Get variable feature information from \code{\link{SCTAssay}} objects
}
\examples{
\dontrun{
# Get the HVF info directly from an SCTAssay object
pbmc_small <- SCTransform(pbmc_small)
HVFInfo(pbmc_small[["SCT"]], method = 'sct')[1:5, ]
}
}
\seealso{
\code{\link[SeuratObject]{HVFInfo}}
}
Seurat/man/ReadXenium.Rd 0000644 0001762 0000144 00000003403 14525500037 014621 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convenience.R, R/preprocessing.R
\name{LoadXenium}
\alias{LoadXenium}
\alias{ReadXenium}
\title{Read and Load 10x Genomics Xenium in-situ data}
\usage{
LoadXenium(data.dir, fov = "fov", assay = "Xenium")
ReadXenium(
data.dir,
outs = c("matrix", "microns"),
type = "centroids",
mols.qv.threshold = 20
)
}
\arguments{
\item{data.dir}{Directory containing all Xenium output files with
default filenames}
\item{fov}{FOV name}
\item{assay}{Assay name}
\item{outs}{Types of molecular outputs to read; choose one or more of:
\itemize{
\item \dQuote{matrix}: the counts matrix
\item \dQuote{microns}: molecule coordinates
}}
\item{type}{Type of cell spatial coordinate matrices to read; choose one
or more of:
\itemize{
\item \dQuote{centroids}: cell centroids in pixel coordinate space
\item \dQuote{segmentations}: cell segmentations in pixel coordinate space
}}
\item{mols.qv.threshold}{Remove transcript molecules with
a QV less than this threshold. QV >= 20 is the standard threshold
used to construct the cell x gene count matrix.}
}
\value{
\code{LoadXenium}: A \code{\link[SeuratObject]{Seurat}} object
\code{ReadXenium}: A list with some combination of the
following values:
\itemize{
\item \dQuote{\code{matrix}}: a
\link[Matrix:dgCMatrix-class]{sparse matrix} with expression data; cells
are columns and features are rows
\item \dQuote{\code{centroids}}: a data frame with cell centroid
coordinates in three columns: \dQuote{x}, \dQuote{y}, and \dQuote{cell}
\item \dQuote{\code{pixels}}: a data frame with molecule pixel coordinates
in three columns: \dQuote{x}, \dQuote{y}, and \dQuote{gene}
}
}
\description{
Read and Load 10x Genomics Xenium in-situ data
}
\concept{preprocessing}
Seurat/man/RunLDA.Rd 0000644 0001762 0000144 00000003125 14525500037 013646 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/mixscape.R
\name{RunLDA}
\alias{RunLDA}
\alias{RunLDA.default}
\alias{RunLDA.Assay}
\alias{RunLDA.Seurat}
\title{Run Linear Discriminant Analysis}
\usage{
RunLDA(object, ...)
\method{RunLDA}{default}(
object,
labels,
assay = NULL,
verbose = TRUE,
ndims.print = 1:5,
nfeatures.print = 30,
reduction.key = "LDA_",
seed = 42,
...
)
\method{RunLDA}{Assay}(
object,
assay = NULL,
labels,
features = NULL,
verbose = TRUE,
ndims.print = 1:5,
nfeatures.print = 30,
reduction.key = "LDA_",
seed = 42,
...
)
\method{RunLDA}{Seurat}(
object,
assay = NULL,
labels,
features = NULL,
reduction.name = "lda",
reduction.key = "LDA_",
seed = 42,
verbose = TRUE,
ndims.print = 1:5,
nfeatures.print = 30,
...
)
}
\arguments{
\item{object}{An object of class Seurat.}
\item{...}{Arguments passed to other methods}
\item{labels}{Meta data column with target gene class labels.}
\item{assay}{Assay to use for performing Linear Discriminant Analysis (LDA).}
\item{verbose}{Print the top genes associated with high/low loadings for
the PCs}
\item{ndims.print}{Number of LDA dimensions to print.}
\item{nfeatures.print}{Number of features to print for each LDA component.}
\item{reduction.key}{Reduction key name.}
\item{seed}{Value for random seed}
\item{features}{Features to compute LDA on}
\item{reduction.name}{dimensional reduction name, lda by default}
}
\description{
Run Linear Discriminant Analysis
Function to perform Linear Discriminant Analysis.
}
\concept{mixscape}
Seurat/man/ScoreJackStraw.Rd 0000644 0001762 0000144 00000003050 14525500037 015443 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/dimensional_reduction.R
\name{ScoreJackStraw}
\alias{ScoreJackStraw}
\alias{ScoreJackStraw.JackStrawData}
\alias{ScoreJackStraw.DimReduc}
\alias{ScoreJackStraw.Seurat}
\title{Compute Jackstraw scores significance.}
\usage{
ScoreJackStraw(object, ...)
\method{ScoreJackStraw}{JackStrawData}(object, dims = 1:5, score.thresh = 1e-05, ...)
\method{ScoreJackStraw}{DimReduc}(object, dims = 1:5, score.thresh = 1e-05, ...)
\method{ScoreJackStraw}{Seurat}(
object,
reduction = "pca",
dims = 1:5,
score.thresh = 1e-05,
do.plot = FALSE,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods}
\item{dims}{Which dimensions to examine}
\item{score.thresh}{Threshold to use for the proportion test of PC
significance (see Details)}
\item{reduction}{Reduction associated with JackStraw to score}
\item{do.plot}{Show plot. To return ggplot object, use \code{JackStrawPlot} after
running ScoreJackStraw.}
}
\value{
Returns a Seurat object
}
\description{
Significant PCs should show a p-value distribution that is
strongly skewed to the left compared to the null distribution.
The p-value for each PC is based on a proportion test comparing the number
of features with a p-value below a particular threshold (score.thresh), compared with the
proportion of features expected under a uniform distribution of p-values.
}
\seealso{
\code{\link{JackStrawPlot}}
\code{\link{JackStrawPlot}}
}
\author{
Omri Wurtzel
}
\concept{dimensional_reduction}
Seurat/man/ReadSlideSeq.Rd 0000644 0001762 0000144 00000000774 14525500037 015075 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{ReadSlideSeq}
\alias{ReadSlideSeq}
\title{Load Slide-seq spatial data}
\usage{
ReadSlideSeq(coord.file, assay = "Spatial")
}
\arguments{
\item{coord.file}{Path to csv file containing bead coordinate positions}
\item{assay}{Name of assay to associate image to}
}
\value{
A \code{\link{SlideSeq}} object
}
\description{
Load Slide-seq spatial data
}
\seealso{
\code{\link{SlideSeq}}
}
\concept{preprocessing}
Seurat/man/SeuratCommand-class.Rd 0000644 0001762 0000144 00000000622 14525500037 016425 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reexports.R
\docType{class}
\name{SeuratCommand-class}
\alias{SeuratCommand-class}
\title{The SeuratCommand Class}
\description{
For more details, please see the documentation in
\code{\link[SeuratObject:SeuratCommand]{SeuratObject}}
}
\seealso{
\code{\link[SeuratObject:SeuratCommand]{SeuratObject::SeuratCommand-class}}
}
Seurat/man/JointPCAIntegration.Rd 0000644 0001762 0000144 00000006050 14525500037 016374 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration5.R
\name{JointPCAIntegration}
\alias{JointPCAIntegration}
\title{Seurat-Joint PCA Integration}
\usage{
JointPCAIntegration(
object = NULL,
assay = NULL,
layers = NULL,
orig = NULL,
new.reduction = "integrated.dr",
reference = NULL,
features = NULL,
normalization.method = c("LogNormalize", "SCT"),
dims = 1:30,
k.anchor = 20,
scale.layer = "scale.data",
dims.to.integrate = NULL,
k.weight = 100,
weight.reduction = NULL,
sd.weight = 1,
sample.tree = NULL,
preserve.order = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{A \code{Seurat} object}
\item{assay}{Name of \code{Assay} in the \code{Seurat} object}
\item{layers}{Names of layers in \code{assay}}
\item{orig}{A \link[SeuratObject:DimReduc]{dimensional reduction} to correct}
\item{new.reduction}{Name of new integrated dimensional reduction}
\item{reference}{A reference \code{Seurat} object}
\item{features}{A vector of features to use for integration}
\item{normalization.method}{Name of normalization method used: LogNormalize
or SCT}
\item{dims}{Dimensions of dimensional reduction to use for integration}
\item{k.anchor}{How many neighbors (k) to use when picking anchors}
\item{scale.layer}{Name of scaled layer in \code{Assay}}
\item{dims.to.integrate}{Number of dimensions to return integrated values for}
\item{k.weight}{Number of neighbors to consider when weighting anchors}
\item{weight.reduction}{Dimension reduction to use when calculating anchor
weights. This can be one of:
\itemize{
\item{A string, specifying the name of a dimension reduction present in
all objects to be integrated}
\item{A vector of strings, specifying the name of a dimension reduction to
use for each object to be integrated}
\item{A vector of \code{\link{DimReduc}} objects, specifying the object to
use for each object in the integration}
\item{NULL, in which case the full corrected space is used for computing
anchor weights.}
}}
\item{sd.weight}{Controls the bandwidth of the Gaussian kernel for weighting}
\item{sample.tree}{Specify the order of integration. Order of integration
should be encoded in a matrix, where each row represents one of the pairwise
integration steps. Negative numbers specify a dataset, positive numbers
specify the integration results from a given row (the format of the merge
matrix included in the \code{\link{hclust}} function output). For example:
\code{matrix(c(-2, 1, -3, -1), ncol = 2)} gives:
\if{html}{\out{}}\preformatted{ [,1] [,2]
[1,] -2 -3
[2,] 1 -1
}\if{html}{\out{
}}
Which would cause dataset 2 and 3 to be integrated first, then the resulting
object integrated with dataset 1.
If NULL, the sample tree will be computed automatically.}
\item{preserve.order}{Do not reorder objects based on size for each pairwise
integration.}
\item{verbose}{Print progress}
\item{...}{Arguments passed on to \code{FindIntegrationAnchors}}
}
\description{
Seurat-Joint PCA Integration
}
Seurat/man/TransferData.Rd 0000644 0001762 0000144 00000015104 14525500037 015137 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{TransferData}
\alias{TransferData}
\title{Transfer data}
\usage{
TransferData(
anchorset,
refdata,
reference = NULL,
query = NULL,
query.assay = NULL,
weight.reduction = "pcaproject",
l2.norm = FALSE,
dims = NULL,
k.weight = 50,
sd.weight = 1,
eps = 0,
n.trees = 50,
verbose = TRUE,
slot = "data",
prediction.assay = FALSE,
only.weights = FALSE,
store.weights = TRUE
)
}
\arguments{
\item{anchorset}{An \code{\link{AnchorSet}} object generated by
\code{\link{FindTransferAnchors}}}
\item{refdata}{Data to transfer. This can be specified in one of two ways:
\itemize{
\item{The reference data itself as either a vector where the names
correspond to the reference cells, or a matrix, where the column names
correspond to the reference cells.}
\item{The name of the metadata field or assay from the reference object
provided. This requires the reference parameter to be specified. If pulling
assay data in this manner, it will pull the data from the data slot. To
transfer data from other slots, please pull the data explicitly with
\code{\link{GetAssayData}} and provide that matrix here.}
}}
\item{reference}{Reference object from which to pull data to transfer}
\item{query}{Query object into which the data will be transferred.}
\item{query.assay}{Name of the Assay to use from query}
\item{weight.reduction}{Dimensional reduction to use for the weighting
anchors. Options are:
\itemize{
\item{pcaproject: Use the projected PCA used for anchor building}
\item{lsiproject: Use the projected LSI used for anchor building}
\item{pca: Use an internal PCA on the query only}
\item{cca: Use the CCA used for anchor building}
\item{custom DimReduc: User provided \code{\link{DimReduc}} object
computed on the query cells}
}}
\item{l2.norm}{Perform L2 normalization on the cell embeddings after
dimensional reduction}
\item{dims}{Set of dimensions to use in the anchor weighting procedure. If
NULL, the same dimensions that were used to find anchors will be used for
weighting.}
\item{k.weight}{Number of neighbors to consider when weighting anchors}
\item{sd.weight}{Controls the bandwidth of the Gaussian kernel for weighting}
\item{eps}{Error bound on the neighbor finding algorithm (from
\code{\link{RANN}})}
\item{n.trees}{More trees gives higher precision when using annoy approximate
nearest neighbor search}
\item{verbose}{Print progress bars and output}
\item{slot}{Slot to store the imputed data. Must be either "data" (default)
or "counts"}
\item{prediction.assay}{Return an \code{Assay} object with the prediction
scores for each class stored in the \code{data} slot.}
\item{only.weights}{Only return weights matrix}
\item{store.weights}{Optionally store the weights matrix used for predictions
in the returned query object.}
}
\value{
If \code{query} is not provided, for the categorical data in \code{refdata},
returns a data.frame with label predictions. If \code{refdata} is a matrix,
returns an Assay object where the imputed data has been stored in the
provided slot.
If \code{query} is provided, a modified query object is returned. For
the categorical data in refdata, prediction scores are stored as Assays
(prediction.score.NAME) and two additional metadata fields: predicted.NAME
and predicted.NAME.score which contain the class prediction and the score for
that predicted class. For continuous data, an Assay called NAME is returned.
NAME here corresponds to the name of the element in the refdata list.
}
\description{
Transfer categorical or continuous data across single-cell datasets. For
transferring categorical information, pass a vector from the reference
dataset (e.g. \code{refdata = reference$celltype}). For transferring
continuous information, pass a matrix from the reference dataset (e.g.
\code{refdata = GetAssayData(reference[['RNA']])}).
}
\details{
The main steps of this procedure are outlined below. For a more detailed
description of the methodology, please see Stuart, Butler, et al Cell 2019.
\doi{10.1016/j.cell.2019.05.031}; \doi{10.1101/460147}
For both transferring discrete labels and also feature imputation, we first
compute the weights matrix.
\itemize{
\item{Construct a weights matrix that defines the association between each
query cell and each anchor. These weights are computed as 1 - the distance
between the query cell and the anchor divided by the distance of the query
cell to the \code{k.weight}th anchor multiplied by the anchor score
computed in \code{\link{FindIntegrationAnchors}}. We then apply a Gaussian
kernel width a bandwidth defined by \code{sd.weight} and normalize across
all \code{k.weight} anchors.}
}
The main difference between label transfer (classification) and feature
imputation is what gets multiplied by the weights matrix. For label transfer,
we perform the following steps:
\itemize{
\item{Create a binary classification matrix, the rows corresponding to each
possible class and the columns corresponding to the anchors. If the
reference cell in the anchor pair is a member of a certain class, that
matrix entry is filled with a 1, otherwise 0.}
\item{Multiply this classification matrix by the transpose of weights
matrix to compute a prediction score for each class for each cell in the
query dataset.}
}
For feature imputation, we perform the following step:
\itemize{
\item{Multiply the expression matrix for the reference anchor cells by the
weights matrix. This returns a predicted expression matrix for the
specified features for each cell in the query dataset.}
}
}
\examples{
\dontrun{
# to install the SeuratData package see https://github.com/satijalab/seurat-data
library(SeuratData)
data("pbmc3k")
# for demonstration, split the object into reference and query
pbmc.reference <- pbmc3k[, 1:1350]
pbmc.query <- pbmc3k[, 1351:2700]
# perform standard preprocessing on each object
pbmc.reference <- NormalizeData(pbmc.reference)
pbmc.reference <- FindVariableFeatures(pbmc.reference)
pbmc.reference <- ScaleData(pbmc.reference)
pbmc.query <- NormalizeData(pbmc.query)
pbmc.query <- FindVariableFeatures(pbmc.query)
pbmc.query <- ScaleData(pbmc.query)
# find anchors
anchors <- FindTransferAnchors(reference = pbmc.reference, query = pbmc.query)
# transfer labels
predictions <- TransferData(anchorset = anchors, refdata = pbmc.reference$seurat_annotations)
pbmc.query <- AddMetaData(object = pbmc.query, metadata = predictions)
}
}
\references{
Stuart T, Butler A, et al. Comprehensive Integration of
Single-Cell Data. Cell. 2019;177:1888-1902 \doi{10.1016/j.cell.2019.05.031}
}
\concept{integration}
Seurat/man/FetchResidualSCTModel.Rd 0000644 0001762 0000144 00000003702 14525500037 016637 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing5.R
\name{FetchResidualSCTModel}
\alias{FetchResidualSCTModel}
\title{Calculate pearson residuals of features not in the scale.data
This function is the secondary function under FetchResiduals}
\usage{
FetchResidualSCTModel(
object,
assay = "SCT",
umi.assay = "RNA",
layer = "counts",
chunk_size = 2000,
layer.cells = NULL,
SCTModel = NULL,
reference.SCT.model = NULL,
new_features = NULL,
clip.range = NULL,
replace.value = FALSE,
verbose = FALSE
)
}
\arguments{
\item{object}{A seurat object}
\item{assay}{Name of the assay of the seurat object generated by
SCTransform. Default is "SCT"}
\item{umi.assay}{Name of the assay of the seurat object to fetch
UMIs from. Default is "RNA"}
\item{layer}{Name of the layer under `umi.assay` to fetch UMIs from.
Default is "counts"}
\item{chunk_size}{Number of cells to load in memory for calculating
residuals}
\item{layer.cells}{Vector of cells to calculate the residual for.
Default is NULL which uses all cells in the layer}
\item{SCTModel}{Which SCTmodel to use from the object for calculating
the residual. Will be ignored if reference.SCT.model is set}
\item{reference.SCT.model}{If a reference SCT model should be used
for calculating the residuals. When set to not NULL, ignores the `SCTModel`
paramater.}
\item{new_features}{A vector of features to calculate the residuals for}
\item{clip.range}{Numeric of length two specifying the min and max values
the Pearson residual will be clipped to. Useful if you want to change the
clip.range.}
\item{replace.value}{Whether to replace the value of residuals if it
already exists}
\item{verbose}{Whether to print messages and progress bars}
}
\value{
Returns a matrix containing centered pearson residuals of
added features
}
\description{
Calculate pearson residuals of features not in the scale.data
This function is the secondary function under FetchResiduals
}
Seurat/man/ProjectData.Rd 0000644 0001762 0000144 00000003633 14525500037 014765 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sketching.R
\name{ProjectData}
\alias{ProjectData}
\title{Project full data to the sketch assay}
\usage{
ProjectData(
object,
assay = "RNA",
sketched.assay = "sketch",
sketched.reduction,
full.reduction,
dims,
normalization.method = c("LogNormalize", "SCT"),
refdata = NULL,
k.weight = 50,
umap.model = NULL,
recompute.neighbors = FALSE,
recompute.weights = FALSE,
verbose = TRUE
)
}
\arguments{
\item{object}{A Seurat object.}
\item{assay}{Assay name for the full data. Default is 'RNA'.}
\item{sketched.assay}{Sketched assay name to project onto. Default is 'sketch'.}
\item{sketched.reduction}{Dimensional reduction results of the sketched assay to project onto.}
\item{full.reduction}{Dimensional reduction name for the projected full dataset.}
\item{dims}{Dimensions to include in the projection.}
\item{normalization.method}{Normalization method to use. Can be 'LogNormalize' or 'SCT'.
Default is 'LogNormalize'.}
\item{refdata}{An optional list for label transfer from sketch to full data. Default is NULL.
Similar to refdata in `MapQuery`}
\item{k.weight}{Number of neighbors to consider when weighting labels for transfer. Default is 50.}
\item{umap.model}{An optional pre-computed UMAP model. Default is NULL.}
\item{recompute.neighbors}{Whether to recompute the neighbors for label transfer. Default is FALSE.}
\item{recompute.weights}{Whether to recompute the weights for label transfer. Default is FALSE.}
\item{verbose}{Print progress and diagnostic messages.}
}
\value{
A Seurat object with the full data projected onto the sketched dimensional reduction results.
The projected data are stored in the specified full reduction.
}
\description{
This function allows projection of high-dimensional single-cell RNA expression data from a full dataset
onto the lower-dimensional embedding of the sketch of the dataset.
}
Seurat/man/ReadMtx.Rd 0000644 0001762 0000144 00000003611 14525500037 014125 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{ReadMtx}
\alias{ReadMtx}
\title{Load in data from remote or local mtx files}
\usage{
ReadMtx(
mtx,
cells,
features,
cell.column = 1,
feature.column = 2,
cell.sep = "\\t",
feature.sep = "\\t",
skip.cell = 0,
skip.feature = 0,
mtx.transpose = FALSE,
unique.features = TRUE,
strip.suffix = FALSE
)
}
\arguments{
\item{mtx}{Name or remote URL of the mtx file}
\item{cells}{Name or remote URL of the cells/barcodes file}
\item{features}{Name or remote URL of the features/genes file}
\item{cell.column}{Specify which column of cells file to use for cell names; default is 1}
\item{feature.column}{Specify which column of features files to use for feature/gene names; default is 2}
\item{cell.sep}{Specify the delimiter in the cell name file}
\item{feature.sep}{Specify the delimiter in the feature name file}
\item{skip.cell}{Number of lines to skip in the cells file before beginning to read cell names}
\item{skip.feature}{Number of lines to skip in the features file before beginning to gene names}
\item{mtx.transpose}{Transpose the matrix after reading in}
\item{unique.features}{Make feature names unique (default TRUE)}
\item{strip.suffix}{Remove trailing "-1" if present in all cell barcodes.}
}
\value{
A sparse matrix containing the expression data.
}
\description{
Enables easy loading of sparse data matrices
}
\examples{
\dontrun{
# For local files:
expression_matrix <- ReadMtx(
mtx = "count_matrix.mtx.gz", features = "features.tsv.gz",
cells = "barcodes.tsv.gz"
)
seurat_object <- CreateSeuratObject(counts = expression_matrix)
# For remote files:
expression_matrix <- ReadMtx(mtx = "http://localhost/matrix.mtx",
cells = "http://localhost/barcodes.tsv",
features = "http://localhost/genes.tsv")
seurat_object <- CreateSeuratObject(counts = data)
}
}
\concept{preprocessing}
Seurat/man/AddModuleScore.Rd 0000644 0001762 0000144 00000004732 14525500037 015420 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{AddModuleScore}
\alias{AddModuleScore}
\title{Calculate module scores for feature expression programs in single cells}
\usage{
AddModuleScore(
object,
features,
pool = NULL,
nbin = 24,
ctrl = 100,
k = FALSE,
assay = NULL,
name = "Cluster",
seed = 1,
search = FALSE,
slot = "data",
...
)
}
\arguments{
\item{object}{Seurat object}
\item{features}{A list of vectors of features for expression programs; each
entry should be a vector of feature names}
\item{pool}{List of features to check expression levels against, defaults to
\code{rownames(x = object)}}
\item{nbin}{Number of bins of aggregate expression levels for all
analyzed features}
\item{ctrl}{Number of control features selected from the same bin per
analyzed feature}
\item{k}{Use feature clusters returned from DoKMeans}
\item{assay}{Name of assay to use}
\item{name}{Name for the expression programs; will append a number to the
end for each entry in \code{features} (eg. if \code{features} has three
programs, the results will be stored as \code{name1}, \code{name2},
\code{name3}, respectively)}
\item{seed}{Set a random seed. If NULL, seed is not set.}
\item{search}{Search for symbol synonyms for features in \code{features} that
don't match features in \code{object}? Searches the HGNC's gene names
database; see \code{\link{UpdateSymbolList}} for more details}
\item{slot}{Slot to calculate score values off of. Defaults to data slot (i.e log-normalized counts)}
\item{...}{Extra parameters passed to \code{\link{UpdateSymbolList}}}
}
\value{
Returns a Seurat object with module scores added to object meta data;
each module is stored as \code{name#} for each module program present in
\code{features}
}
\description{
Calculate the average expression levels of each program (cluster) on single
cell level, subtracted by the aggregated expression of control feature sets.
All analyzed features are binned based on averaged expression, and the
control features are randomly selected from each bin.
}
\examples{
\dontrun{
data("pbmc_small")
cd_features <- list(c(
'CD79B',
'CD79A',
'CD19',
'CD180',
'CD200',
'CD3D',
'CD2',
'CD3E',
'CD7',
'CD8A',
'CD14',
'CD1C',
'CD68',
'CD9',
'CD247'
))
pbmc_small <- AddModuleScore(
object = pbmc_small,
features = cd_features,
ctrl = 5,
name = 'CD_Features'
)
head(x = pbmc_small[])
}
}
\references{
Tirosh et al, Science (2016)
}
\concept{utilities}
Seurat/man/Graph-class.Rd 0000644 0001762 0000144 00000000542 14525500037 014725 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reexports.R
\docType{class}
\name{Graph-class}
\alias{Graph-class}
\title{The Graph Class}
\description{
For more details, please see the documentation in
\code{\link[SeuratObject:Graph]{SeuratObject}}
}
\seealso{
\code{\link[SeuratObject:Graph]{SeuratObject::Graph-class}}
}
Seurat/man/MixingMetric.Rd 0000644 0001762 0000144 00000002275 14525500037 015165 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{MixingMetric}
\alias{MixingMetric}
\title{Calculates a mixing metric}
\usage{
MixingMetric(
object,
grouping.var,
reduction = "pca",
dims = 1:2,
k = 5,
max.k = 300,
eps = 0,
verbose = TRUE
)
}
\arguments{
\item{object}{Seurat object}
\item{grouping.var}{Grouping variable for dataset}
\item{reduction}{Which dimensionally reduced space to use}
\item{dims}{Dimensions to use}
\item{k}{Neighbor number to examine per group}
\item{max.k}{Maximum size of local neighborhood to compute}
\item{eps}{Error bound on the neighbor finding algorithm (from RANN)}
\item{verbose}{Displays progress bar}
}
\value{
Returns a vector of values of the mixing metric for each cell
}
\description{
Here we compute a measure of how well mixed a composite dataset is. To
compute, we first examine the local neighborhood for each cell (looking at
max.k neighbors) and determine for each group (could be the dataset after
integration) the k nearest neighbor and what rank that neighbor was in the
overall neighborhood. We then take the median across all groups as the mixing
metric per cell.
}
\concept{integration}
Seurat/man/FeatureScatter.Rd 0000644 0001762 0000144 00000005152 14525500037 015504 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{FeatureScatter}
\alias{FeatureScatter}
\alias{GenePlot}
\title{Scatter plot of single cell data}
\usage{
FeatureScatter(
object,
feature1,
feature2,
cells = NULL,
shuffle = FALSE,
seed = 1,
group.by = NULL,
split.by = NULL,
cols = NULL,
pt.size = 1,
shape.by = NULL,
span = NULL,
smooth = FALSE,
combine = TRUE,
slot = "data",
plot.cor = TRUE,
ncol = NULL,
raster = NULL,
raster.dpi = c(512, 512),
jitter = FALSE
)
}
\arguments{
\item{object}{Seurat object}
\item{feature1}{First feature to plot. Typically feature expression but can also
be metrics, PC scores, etc. - anything that can be retreived with FetchData}
\item{feature2}{Second feature to plot.}
\item{cells}{Cells to include on the scatter plot.}
\item{shuffle}{Whether to randomly shuffle the order of points. This can be
useful for crowded plots if points of interest are being buried. (default is FALSE)}
\item{seed}{Sets the seed if randomly shuffling the order of points.}
\item{group.by}{Name of one or more metadata columns to group (color) cells by
(for example, orig.ident); pass 'ident' to group by identity class}
\item{split.by}{A factor in object metadata to split the feature plot by, pass 'ident'
to split by cell identity'}
\item{cols}{Colors to use for identity class plotting.}
\item{pt.size}{Size of the points on the plot}
\item{shape.by}{Ignored for now}
\item{span}{Spline span in loess function call, if \code{NULL}, no spline added}
\item{smooth}{Smooth the graph (similar to smoothScatter)}
\item{combine}{Combine plots into a single \code{\link[patchwork]{patchwork}ed}}
\item{slot}{Slot to pull data from, should be one of 'counts', 'data', or 'scale.data'}
\item{plot.cor}{Display correlation in plot title}
\item{ncol}{Number of columns if plotting multiple plots}
\item{raster}{Convert points to raster format, default is \code{NULL}
which will automatically use raster if the number of points plotted is greater than
100,000}
\item{raster.dpi}{Pixel resolution for rasterized plots, passed to geom_scattermore().
Default is c(512, 512).}
\item{jitter}{Jitter for easier visualization of crowded points (default is FALSE)}
}
\value{
A ggplot object
}
\description{
Creates a scatter plot of two features (typically feature expression), across a
set of single cells. Cells are colored by their identity class. Pearson
correlation between the two features is displayed above the plot.
}
\examples{
data("pbmc_small")
FeatureScatter(object = pbmc_small, feature1 = 'CD9', feature2 = 'CD3E')
}
\concept{visualization}
Seurat/man/CalcPerturbSig.Rd 0000644 0001762 0000144 00000003611 14525500037 015432 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixscape.R
\name{CalcPerturbSig}
\alias{CalcPerturbSig}
\title{Calculate a perturbation Signature}
\usage{
CalcPerturbSig(
object,
assay = NULL,
features = NULL,
slot = "data",
gd.class = "guide_ID",
nt.cell.class = "NT",
split.by = NULL,
num.neighbors = NULL,
reduction = "pca",
ndims = 15,
new.assay.name = "PRTB",
verbose = TRUE
)
}
\arguments{
\item{object}{An object of class Seurat.}
\item{assay}{Name of Assay PRTB signature is being calculated on.}
\item{features}{Features to compute PRTB signature for. Defaults to the
variable features set in the assay specified.}
\item{slot}{Data slot to use for PRTB signature calculation.}
\item{gd.class}{Metadata column containing target gene classification.}
\item{nt.cell.class}{Non-targeting gRNA cell classification identity.}
\item{split.by}{Provide metadata column if multiple biological replicates
exist to calculate PRTB signature for every replicate separately.}
\item{num.neighbors}{Number of nearest neighbors to consider.}
\item{reduction}{Reduction method used to calculate nearest neighbors.}
\item{ndims}{Number of dimensions to use from dimensionality reduction method.}
\item{new.assay.name}{Name for the new assay.}
\item{verbose}{Display progress + messages}
}
\value{
Returns a Seurat object with a new assay added containing the
perturbation signature for all cells in the data slot.
}
\description{
Function to calculate perturbation signature for pooled CRISPR screen datasets.
For each target cell (expressing one target gRNA), we identified 20 cells
from the control pool (non-targeting cells) with the most similar mRNA
expression profiles. The perturbation signature is calculated by subtracting the
averaged mRNA expression profile of the non-targeting neighbors from the mRNA
expression profile of the target cell.
}
\concept{mixscape}
Seurat/man/as.SingleCellExperiment.Rd 0000644 0001762 0000144 00000001142 14525500037 017242 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/objects.R
\name{as.SingleCellExperiment}
\alias{as.SingleCellExperiment}
\alias{as.SingleCellExperiment.Seurat}
\title{Convert objects to SingleCellExperiment objects}
\usage{
as.SingleCellExperiment(x, ...)
\method{as.SingleCellExperiment}{Seurat}(x, assay = NULL, ...)
}
\arguments{
\item{x}{An object to convert to class \code{SingleCellExperiment}}
\item{...}{Arguments passed to other methods}
\item{assay}{Assays to convert}
}
\description{
Convert objects to SingleCellExperiment objects
}
\concept{objects}
Seurat/man/SelectSCTIntegrationFeatures.Rd 0000644 0001762 0000144 00000001160 14525500037 020252 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{SelectSCTIntegrationFeatures}
\alias{SelectSCTIntegrationFeatures}
\title{Select SCT integration features}
\usage{
SelectSCTIntegrationFeatures(
object,
nfeatures = 3000,
assay = NULL,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{Seurat object}
\item{nfeatures}{Number of features to return for integration}
\item{assay}{Name of assay to use for integration feature selection}
\item{verbose}{Print messages}
\item{...}{Arguments passed on to \code{method}}
}
\description{
Select SCT integration features
}
Seurat/man/RunTSNE.Rd 0000644 0001762 0000144 00000005525 14525500037 014025 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/dimensional_reduction.R
\name{RunTSNE}
\alias{RunTSNE}
\alias{RunTSNE.matrix}
\alias{RunTSNE.DimReduc}
\alias{RunTSNE.dist}
\alias{RunTSNE.Seurat}
\title{Run t-distributed Stochastic Neighbor Embedding}
\usage{
RunTSNE(object, ...)
\method{RunTSNE}{matrix}(
object,
assay = NULL,
seed.use = 1,
tsne.method = "Rtsne",
dim.embed = 2,
reduction.key = "tSNE_",
...
)
\method{RunTSNE}{DimReduc}(
object,
cells = NULL,
dims = 1:5,
seed.use = 1,
tsne.method = "Rtsne",
dim.embed = 2,
reduction.key = "tSNE_",
...
)
\method{RunTSNE}{dist}(
object,
assay = NULL,
seed.use = 1,
tsne.method = "Rtsne",
dim.embed = 2,
reduction.key = "tSNE_",
...
)
\method{RunTSNE}{Seurat}(
object,
reduction = "pca",
cells = NULL,
dims = 1:5,
features = NULL,
seed.use = 1,
tsne.method = "Rtsne",
dim.embed = 2,
distance.matrix = NULL,
reduction.name = "tsne",
reduction.key = "tSNE_",
...
)
}
\arguments{
\item{object}{Seurat object}
\item{...}{Arguments passed to other methods and to t-SNE call (most commonly used is perplexity)}
\item{assay}{Name of assay that that t-SNE is being run on}
\item{seed.use}{Random seed for the t-SNE. If NULL, does not set the seed}
\item{tsne.method}{Select the method to use to compute the tSNE. Available
methods are:
\itemize{
\item \dQuote{\code{Rtsne}}: Use the Rtsne package Barnes-Hut
implementation of tSNE (default)
\item \dQuote{\code{FIt-SNE}}: Use the FFT-accelerated Interpolation-based
t-SNE. Based on Kluger Lab code found here:
\url{https://github.com/KlugerLab/FIt-SNE}
}}
\item{dim.embed}{The dimensional space of the resulting tSNE embedding
(default is 2). For example, set to 3 for a 3d tSNE}
\item{reduction.key}{dimensional reduction key, specifies the string before
the number for the dimension names. \dQuote{\code{tSNE_}} by default}
\item{cells}{Which cells to analyze (default, all cells)}
\item{dims}{Which dimensions to use as input features}
\item{reduction}{Which dimensional reduction (e.g. PCA, ICA) to use for
the tSNE. Default is PCA}
\item{features}{If set, run the tSNE on this subset of features
(instead of running on a set of reduced dimensions). Not set (NULL) by default;
\code{dims} must be NULL to run on features}
\item{distance.matrix}{If set, runs tSNE on the given distance matrix
instead of data matrix (experimental)}
\item{reduction.name}{dimensional reduction name, specifies the position in the object$dr list. tsne by default}
}
\description{
Run t-SNE dimensionality reduction on selected features. Has the option of
running in a reduced dimensional space (i.e. spectral tSNE, recommended),
or running based on a set of genes. For details about stored TSNE calculation
parameters, see \code{PrintTSNEParams}.
}
\concept{dimensional_reduction}
Seurat/man/CountSketch.Rd 0000644 0001762 0000144 00000001375 14525500037 015020 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sketching.R
\name{CountSketch}
\alias{CountSketch}
\title{Generate CountSketch random matrix}
\usage{
CountSketch(nsketch, ncells, seed = NA_integer_, ...)
}
\arguments{
\item{nsketch}{Number of sketching random cells}
\item{ncells}{Number of cells in the original data}
\item{seed}{a single value, interpreted as an integer, or \code{NULL}
(see \sQuote{Details}).}
\item{...}{Ignored}
}
\value{
...
}
\description{
Generate CountSketch random matrix
}
\references{
Clarkson, KL. & Woodruff, DP.
Low-rank approximation and regression in input sparsity time.
Journal of the ACM (JACM). 2017 Jan 30;63(6):1-45.
\url{https://dl.acm.org/doi/abs/10.1145/3019134};
}
\keyword{internal}
Seurat/man/PrepLDA.Rd 0000644 0001762 0000144 00000002301 14525500037 014003 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixscape.R
\name{PrepLDA}
\alias{PrepLDA}
\title{Function to prepare data for Linear Discriminant Analysis.}
\usage{
PrepLDA(
object,
de.assay = "RNA",
pc.assay = "PRTB",
labels = "gene",
nt.label = "NT",
npcs = 10,
verbose = TRUE,
logfc.threshold = 0.25
)
}
\arguments{
\item{object}{An object of class Seurat.}
\item{de.assay}{Assay to use for selection of DE genes.}
\item{pc.assay}{Assay to use for running Principle components analysis.}
\item{labels}{Meta data column with target gene class labels.}
\item{nt.label}{Name of non-targeting cell class.}
\item{npcs}{Number of principle components to use.}
\item{verbose}{Print progress bar.}
\item{logfc.threshold}{Limit testing to genes which show, on average, at least
X-fold difference (log-scale) between the two groups of cells. Default is 0.1
Increasing logfc.threshold speeds up the function, but can miss weaker signals.}
}
\value{
Returns a list of the first 10 PCs from each projection.
}
\description{
This function performs unsupervised PCA on each mixscape class separately and projects each subspace onto all
cells in the data.
}
\concept{mixscape}
Seurat/man/MetaFeature.Rd 0000644 0001762 0000144 00000001760 14525500037 014766 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{MetaFeature}
\alias{MetaFeature}
\title{Aggregate expression of multiple features into a single feature}
\usage{
MetaFeature(
object,
features,
meta.name = "metafeature",
cells = NULL,
assay = NULL,
slot = "data"
)
}
\arguments{
\item{object}{A Seurat object}
\item{features}{List of features to aggregate}
\item{meta.name}{Name of column in metadata to store metafeature}
\item{cells}{List of cells to use (default all cells)}
\item{assay}{Which assay to use}
\item{slot}{Which slot to take data from (default data)}
}
\value{
Returns a \code{Seurat} object with metafeature stored in objct metadata
}
\description{
Calculates relative contribution of each feature to each cell
for given set of features.
}
\examples{
data("pbmc_small")
pbmc_small <- MetaFeature(
object = pbmc_small,
features = c("LTB", "EAF2"),
meta.name = 'var.aggregate'
)
head(pbmc_small[[]])
}
\concept{utilities}
Seurat/man/VariableFeaturePlot.Rd 0000644 0001762 0000144 00000002474 14525500037 016467 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{VariableFeaturePlot}
\alias{VariableFeaturePlot}
\alias{VariableGenePlot}
\alias{MeanVarPlot}
\title{View variable features}
\usage{
VariableFeaturePlot(
object,
cols = c("black", "red"),
pt.size = 1,
log = NULL,
selection.method = NULL,
assay = NULL,
raster = NULL,
raster.dpi = c(512, 512)
)
}
\arguments{
\item{object}{Seurat object}
\item{cols}{Colors to specify non-variable/variable status}
\item{pt.size}{Size of the points on the plot}
\item{log}{Plot the x-axis in log scale}
\item{selection.method}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}}
\item{assay}{Assay to pull variable features from}
\item{raster}{Convert points to raster format, default is \code{NULL}
which will automatically use raster if the number of points plotted is greater than
100,000}
\item{raster.dpi}{Pixel resolution for rasterized plots, passed to geom_scattermore().
Default is c(512, 512).}
}
\value{
A ggplot object
}
\description{
View variable features
}
\examples{
data("pbmc_small")
VariableFeaturePlot(object = pbmc_small)
}
\seealso{
\code{\link{FindVariableFeatures}}
}
\concept{visualization}
Seurat/man/CombinePlots.Rd 0000644 0001762 0000144 00000002123 14525500037 015154 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{CombinePlots}
\alias{CombinePlots}
\title{Combine ggplot2-based plots into a single plot}
\usage{
CombinePlots(plots, ncol = NULL, legend = NULL, ...)
}
\arguments{
\item{plots}{A list of gg objects}
\item{ncol}{Number of columns}
\item{legend}{Combine legends into a single legend
choose from 'right' or 'bottom'; pass 'none' to remove legends, or \code{NULL}
to leave legends as they are}
\item{...}{Extra parameters passed to plot_grid}
}
\value{
A combined plot
}
\description{
Combine ggplot2-based plots into a single plot
}
\examples{
data("pbmc_small")
pbmc_small[['group']] <- sample(
x = c('g1', 'g2'),
size = ncol(x = pbmc_small),
replace = TRUE
)
plot1 <- FeaturePlot(
object = pbmc_small,
features = 'MS4A1',
split.by = 'group'
)
plot2 <- FeaturePlot(
object = pbmc_small,
features = 'FCN1',
split.by = 'group'
)
CombinePlots(
plots = list(plot1, plot2),
legend = 'none',
nrow = length(x = unique(x = pbmc_small[['group', drop = TRUE]]))
)
}
\concept{visualization}
Seurat/man/LabelPoints.Rd 0000644 0001762 0000144 00000002450 14525500037 014775 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{LabelPoints}
\alias{LabelPoints}
\alias{Labeler}
\title{Add text labels to a ggplot2 plot}
\usage{
LabelPoints(
plot,
points,
labels = NULL,
repel = FALSE,
xnudge = 0.3,
ynudge = 0.05,
...
)
}
\arguments{
\item{plot}{A ggplot2 plot with a GeomPoint layer}
\item{points}{A vector of points to label; if \code{NULL}, will use all points in the plot}
\item{labels}{A vector of labels for the points; if \code{NULL}, will use
rownames of the data provided to the plot at the points selected}
\item{repel}{Use \code{geom_text_repel} to create a nicely-repelled labels; this
is slow when a lot of points are being plotted. If using \code{repel}, set \code{xnudge}
and \code{ynudge} to 0}
\item{xnudge, ynudge}{Amount to nudge X and Y coordinates of labels by}
\item{...}{Extra parameters passed to \code{geom_text}}
}
\value{
A ggplot object
}
\description{
Add text labels to a ggplot2 plot
}
\examples{
data("pbmc_small")
ff <- TopFeatures(object = pbmc_small[['pca']])
cc <- TopCells(object = pbmc_small[['pca']])
plot <- FeatureScatter(object = pbmc_small, feature1 = ff[1], feature2 = ff[2])
LabelPoints(plot = plot, points = cc)
}
\seealso{
\code{\link[ggplot2]{geom_text}}
}
\concept{visualization}
Seurat/man/ReadVizgen.Rd 0000644 0001762 0000144 00000012055 14525500037 014621 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R, R/convenience.R
\name{ReadVizgen}
\alias{ReadVizgen}
\alias{LoadVizgen}
\title{Read and Load MERFISH Input from Vizgen}
\usage{
ReadVizgen(
data.dir,
transcripts = NULL,
spatial = NULL,
molecules = NULL,
type = "segmentations",
mol.type = "microns",
metadata = NULL,
filter = NA_character_,
z = 3L
)
LoadVizgen(data.dir, fov, assay = "Vizgen", z = 3L)
}
\arguments{
\item{data.dir}{Path to the directory with Vizgen MERFISH files; requires at
least one of the following files present:
\itemize{
\item \dQuote{\code{cell_by_gene.csv}}: used for reading count matrix
\item \dQuote{\code{cell_metadata.csv}}: used for reading cell spatial
coordinate matrices
\item \dQuote{\code{detected_transcripts.csv}}: used for reading molecule
spatial coordinate matrices
}}
\item{transcripts}{Optional file path for counts matrix; pass \code{NA} to
suppress reading counts matrix}
\item{spatial}{Optional file path for spatial metadata; pass \code{NA} to
suppress reading spatial coordinates. If \code{spatial} is provided and
\code{type} is \dQuote{segmentations}, uses \code{dirname(spatial)} instead of
\code{data.dir} to find HDF5 files}
\item{molecules}{Optional file path for molecule coordinates file; pass
\code{NA} to suppress reading spatial molecule information}
\item{type}{Type of cell spatial coordinate matrices to read; choose one
or more of:
\itemize{
\item \dQuote{segmentations}: cell segmentation vertices; requires
\href{https://cran.r-project.org/package=hdf5r}{\pkg{hdf5r}} to be
installed and requires a directory \dQuote{\code{cell_boundaries}} within
\code{data.dir}. Within \dQuote{\code{cell_boundaries}}, there must be
one or more HDF5 file named \dQuote{\code{feature_data_##.hdf5}}
\item \dQuote{centroids}: cell centroids in micron coordinate space
\item \dQuote{boxes}: cell box outlines in micron coordinate space
}}
\item{mol.type}{Type of molecule spatial coordinate matrices to read;
choose one or more of:
\itemize{
\item \dQuote{pixels}: molecule coordinates in pixel space
\item \dQuote{microns}: molecule coordinates in micron space
}}
\item{metadata}{Type of available metadata to read;
choose zero or more of:
\itemize{
\item \dQuote{volume}: estimated cell volume
\item \dQuote{fov}: cell's fov
}}
\item{filter}{A character to filter molecules by, pass \code{NA} to skip
molecule filtering}
\item{z}{Z-index to load; must be between 0 and 6, inclusive}
\item{fov}{Name to store FOV as}
\item{assay}{Name to store expression matrix as}
}
\value{
\code{ReadVizgen}: A list with some combination of the
following values:
\itemize{
\item \dQuote{\code{transcripts}}: a
\link[Matrix:dgCMatrix-class]{sparse matrix} with expression data; cells
are columns and features are rows
\item \dQuote{\code{segmentations}}: a data frame with cell polygon outlines in
three columns: \dQuote{x}, \dQuote{y}, and \dQuote{cell}
\item \dQuote{\code{centroids}}: a data frame with cell centroid
coordinates in three columns: \dQuote{x}, \dQuote{y}, and \dQuote{cell}
\item \dQuote{\code{boxes}}: a data frame with cell box outlines in three
columns: \dQuote{x}, \dQuote{y}, and \dQuote{cell}
\item \dQuote{\code{microns}}: a data frame with molecule micron
coordinates in three columns: \dQuote{x}, \dQuote{y}, and \dQuote{gene}
\item \dQuote{\code{pixels}}: a data frame with molecule pixel coordinates
in three columns: \dQuote{x}, \dQuote{y}, and \dQuote{gene}
\item \dQuote{\code{metadata}}: a data frame with the cell-level metadata
requested by \code{metadata}
}
\code{LoadVizgen}: A \code{\link[SeuratObject]{Seurat}} object
}
\description{
Read and load in MERFISH data from Vizgen-formatted files
}
\note{
This function requires the
\href{https://cran.r-project.org/package=data.table}{\pkg{data.table}} package
to be installed
}
\section{Progress Updates with \pkg{progressr}}{
This function uses
\href{https://cran.r-project.org/package=progressr}{\pkg{progressr}} to
render status updates and progress bars. To enable progress updates, wrap
the function call in \code{\link[progressr]{with_progress}} or run
\code{\link[progressr:handlers]{handlers(global = TRUE)}} before running
this function. For more details about \pkg{progressr}, please read
\href{https://progressr.futureverse.org/articles/progressr-intro.html}{\code{vignette("progressr-intro")}}
}
\section{Parallelization with \pkg{future}}{
This function uses
\href{https://cran.r-project.org/package=future}{\pkg{future}} to enable
parallelization. Parallelization strategies can be set using
\code{\link[future]{plan}}. Common plans include \dQuote{\code{sequential}}
for non-parallelized processing or \dQuote{\code{multisession}} for parallel
evaluation using multiple \R sessions; for other plans, see the
\dQuote{Implemented evaluation strategies} section of
\code{\link[future:plan]{?future::plan}}. For a more thorough introduction
to \pkg{future}, see
\href{https://future.futureverse.org/articles/future-1-overview.html}{\code{vignette("future-1-overview")}}
}
\concept{future}
\concept{preprocessing}
Seurat/man/BarcodeInflectionsPlot.Rd 0000644 0001762 0000144 00000002033 14525500037 017152 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{BarcodeInflectionsPlot}
\alias{BarcodeInflectionsPlot}
\title{Plot the Barcode Distribution and Calculated Inflection Points}
\usage{
BarcodeInflectionsPlot(object)
}
\arguments{
\item{object}{Seurat object}
}
\value{
Returns a `ggplot2` object showing the by-group inflection points and provided
(or default) rank threshold values in grey.
}
\description{
This function plots the calculated inflection points derived from the barcode-rank
distribution.
}
\details{
See [CalculateBarcodeInflections()] to calculate inflection points and
[SubsetByBarcodeInflections()] to subsequently subset the Seurat object.
}
\examples{
data("pbmc_small")
pbmc_small <- CalculateBarcodeInflections(pbmc_small, group.column = 'groups')
BarcodeInflectionsPlot(pbmc_small)
}
\seealso{
\code{\link{CalculateBarcodeInflections}} \code{\link{SubsetByBarcodeInflections}}
}
\author{
Robert A. Amezquita, \email{robert.amezquita@fredhutch.org}
}
\concept{visualization}
Seurat/man/ProjectIntegration.Rd 0000644 0001762 0000144 00000004322 14525500037 016373 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{ProjectIntegration}
\alias{ProjectIntegration}
\title{Integrate embeddings from the integrated sketched.assay}
\usage{
ProjectIntegration(
object,
sketched.assay = "sketch",
assay = "RNA",
reduction = "integrated_dr",
features = NULL,
layers = "data",
reduction.name = NULL,
reduction.key = NULL,
method = c("sketch", "data"),
ratio = 0.8,
sketched.layers = NULL,
seed = 123,
verbose = TRUE
)
}
\arguments{
\item{object}{A Seurat object with all cells for one dataset}
\item{sketched.assay}{Assay name for sketched-cell expression (default is 'sketch')}
\item{assay}{Assay name for original expression (default is 'RNA')}
\item{reduction}{Dimensional reduction name for batch-corrected embeddings
in the sketched object (default is 'integrated_dr')}
\item{features}{Features used for atomic sketch integration}
\item{layers}{Names of layers for correction.}
\item{reduction.name}{Name to save new reduction as; defaults to
\code{paste0(reduction, '.orig')}}
\item{reduction.key}{Key for new dimensional reduction; defaults to creating
one from \code{reduction.name}}
\item{method}{Methods to construct sketch-cell representation
for all cells (default is 'sketch'). Can be one of:
\itemize{
\item \dQuote{\code{sketch}}: Use random sketched data slot
\item \dQuote{\code{data}}: Use data slot
}}
\item{ratio}{Sketch ratio of data slot when \code{dictionary.method} is set
to \dQuote{\code{sketch}}; defaults to 0.8}
\item{sketched.layers}{Names of sketched layers, defaults to all
layers of \dQuote{\code{object[[assay]]}}}
\item{seed}{A positive integer. The seed for the random number generator, defaults to 123.}
\item{verbose}{Print progress and message}
}
\value{
Returns a Seurat object with an integrated dimensional reduction
}
\description{
The main steps of this procedure are outlined below. For a more detailed
description of the methodology, please see Hao, et al Biorxiv 2022:
\doi{10.1101/2022.02.24.481684}
}
\details{
First learn a atom dictionary representation to reconstruct each cell.
Then, using this dictionary representation,
reconstruct the embeddings of each cell from the integrated atoms.
}
Seurat/man/TransferAnchorSet-class.Rd 0000644 0001762 0000144 00000000612 14525500037 017255 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{TransferAnchorSet-class}
\alias{TransferAnchorSet-class}
\alias{TransferAnchorSet}
\title{The TransferAnchorSet Class}
\description{
Inherits from the Anchorset class. Implemented mainly for method dispatch
purposes. See \code{\link{AnchorSet}} for slot details.
}
\concept{objects}
Seurat/man/SlideSeq-class.Rd 0000644 0001762 0000144 00000001457 14525500037 015403 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{SlideSeq-class}
\alias{SlideSeq-class}
\alias{SlideSeq}
\title{The SlideSeq class}
\description{
The SlideSeq class represents spatial information from the Slide-seq platform
}
\section{Slots}{
\describe{
\item{\code{coordinates}}{...}
}}
\section{Slots}{
\describe{
\item{\code{assay}}{Name of assay to associate image data with; will give this image
priority for visualization when the assay is set as the active/default assay
in a \code{Seurat} object}
\item{\code{key}}{A one-length character vector with the object's key; keys must
be one or more alphanumeric characters followed by an underscore
\dQuote{\code{_}} (regex pattern
\dQuote{\code{^[a-zA-Z][a-zA-Z0-9]*_$}})}
}
}
\concept{spatial}
Seurat/man/JackStraw.Rd 0000644 0001762 0000144 00000003302 14525500037 014447 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimensional_reduction.R
\name{JackStraw}
\alias{JackStraw}
\title{Determine statistical significance of PCA scores.}
\usage{
JackStraw(
object,
reduction = "pca",
assay = NULL,
dims = 20,
num.replicate = 100,
prop.freq = 0.01,
verbose = TRUE,
maxit = 1000
)
}
\arguments{
\item{object}{Seurat object}
\item{reduction}{DimReduc to use. ONLY PCA CURRENTLY SUPPORTED.}
\item{assay}{Assay used to calculate reduction.}
\item{dims}{Number of PCs to compute significance for}
\item{num.replicate}{Number of replicate samplings to perform}
\item{prop.freq}{Proportion of the data to randomly permute for each
replicate}
\item{verbose}{Print progress bar showing the number of replicates
that have been processed.}
\item{maxit}{maximum number of iterations to be performed by the irlba function of RunPCA}
}
\value{
Returns a Seurat object where JS(object = object[['pca']], slot = 'empirical')
represents p-values for each gene in the PCA analysis. If ProjectPCA is
subsequently run, JS(object = object[['pca']], slot = 'full') then
represents p-values for all genes.
}
\description{
Randomly permutes a subset of data, and calculates projected PCA scores for
these 'random' genes. Then compares the PCA scores for the 'random' genes
with the observed PCA scores to determine statistical signifance. End result
is a p-value for each gene's association with each principal component.
}
\examples{
\dontrun{
data("pbmc_small")
pbmc_small = suppressWarnings(JackStraw(pbmc_small))
head(JS(object = pbmc_small[['pca']], slot = 'empirical'))
}
}
\references{
Inspired by Chung et al, Bioinformatics (2014)
}
\concept{dimensional_reduction}
Seurat/man/SingleImagePlot.Rd 0000644 0001762 0000144 00000005651 14525500037 015612 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{SingleImagePlot}
\alias{SingleImagePlot}
\title{Single Spatial Plot}
\usage{
SingleImagePlot(
data,
col.by = NA,
col.factor = TRUE,
cols = NULL,
shuffle.cols = FALSE,
size = 0.1,
molecules = NULL,
mols.size = 0.1,
mols.cols = NULL,
mols.alpha = 1,
alpha = molecules \%iff\% 0.3 \%||\% 0.6,
border.color = "white",
border.size = NULL,
na.value = "grey50",
dark.background = TRUE,
...
)
}
\arguments{
\item{data}{A data frame with at least the following columns:
\itemize{
\item \dQuote{\code{x}}: Spatial-resolved \emph{x} coordinates, will be
plotted on the \emph{y}-axis
\item \dQuote{\code{y}}: Spatially-resolved \emph{y} coordinates, will be
plotted on the \emph{x}-axis
\item \dQuote{\code{cell}}: Cell name
\item \dQuote{\code{boundary}}: Segmentation boundary label; when plotting
multiple segmentation layers, the order of boundary transparency is set by
factor levels for this column
}
Can pass \code{NA} to \code{data} suppress segmentation visualization}
\item{col.by}{Name of column in \code{data} to color cell segmentations by;
pass \code{NA} to suppress coloring}
\item{col.factor}{Are the colors a factor or discrete?}
\item{cols}{Colors for cell segmentations; can be one of the
following:
\itemize{
\item \code{NULL} for default ggplot2 colors
\item A numeric value or name of a
\link[RColorBrewer:RColorBrewer]{color brewer palette}
\item Name of a palette for \code{\link{DiscretePalette}}
\item A vector of colors equal to the length of unique levels of
\code{data$col.by}
}}
\item{shuffle.cols}{Randomly shuffle colors when a palette or
vector of colors is provided to \code{cols}}
\item{size}{Point size for cells when plotting centroids}
\item{molecules}{A data frame with spatially-resolved molecule coordinates;
should have the following columns:
\itemize{
\item \dQuote{\code{x}}: Spatial-resolved \emph{x} coordinates, will be
plotted on the \emph{y}-axis
\item \dQuote{\code{y}}: Spatially-resolved \emph{y} coordinates, will be
plotted on the \emph{x}-axis
\item \dQuote{\code{molecule}}: Molecule name
}}
\item{mols.size}{Point size for molecules}
\item{mols.cols}{A vector of color for molecules. The "Set1" palette from
RColorBrewer is used by default.}
\item{mols.alpha}{Alpha value for molecules, should be between 0 and 1}
\item{alpha}{Alpha value, should be between 0 and 1; when plotting multiple
boundaries, \code{alpha} is equivalent to max alpha}
\item{border.color}{Color of cell segmentation border; pass \code{NA}
to suppress borders for segmentation-based plots}
\item{border.size}{Thickness of cell segmentation borders; pass \code{NA}
to suppress borders for centroid-based plots}
\item{na.value}{Color value for \code{NA} segmentations when
using custom scale}
\item{...}{Ignored}
}
\value{
A ggplot object
}
\description{
Single Spatial Plot
}
\keyword{internal}
Seurat/man/SelectIntegrationFeatures5.Rd 0000644 0001762 0000144 00000001751 14525500037 017773 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{SelectIntegrationFeatures5}
\alias{SelectIntegrationFeatures5}
\title{Select integration features}
\usage{
SelectIntegrationFeatures5(
object,
nfeatures = 2000,
assay = NULL,
method = NULL,
layers = NULL,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{Seurat object}
\item{nfeatures}{Number of features to return for integration}
\item{assay}{Name of assay to use for integration feature selection}
\item{method}{Which method to pull. For \code{HVFInfo} and
\code{VariableFeatures}, choose one from one of the
following:
\itemize{
\item \dQuote{vst}
\item \dQuote{sctransform} or \dQuote{sct}
\item \dQuote{mean.var.plot}, \dQuote{dispersion}, \dQuote{mvp}, or
\dQuote{disp}
}}
\item{layers}{Name of layers to use for integration feature selection}
\item{verbose}{Print messages}
\item{...}{Arguments passed on to \code{method}}
}
\description{
Select integration features
}
Seurat/man/LoadAnnoyIndex.Rd 0000644 0001762 0000144 00000000643 14525500037 015437 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{LoadAnnoyIndex}
\alias{LoadAnnoyIndex}
\title{Load the Annoy index file}
\usage{
LoadAnnoyIndex(object, file)
}
\arguments{
\item{object}{Neighbor object}
\item{file}{Path to file with annoy index}
}
\value{
Returns the Neighbor object with the index stored
}
\description{
Load the Annoy index file
}
\concept{utilities}
Seurat/man/SplitObject.Rd 0000644 0001762 0000144 00000002157 14525500037 015007 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{SplitObject}
\alias{SplitObject}
\title{Splits object into a list of subsetted objects.}
\usage{
SplitObject(object, split.by = "ident")
}
\arguments{
\item{object}{Seurat object}
\item{split.by}{Attribute for splitting. Default is "ident". Currently
only supported for class-level (i.e. non-quantitative) attributes.}
}
\value{
A named list of Seurat objects, each containing a subset of cells
from the original object.
}
\description{
Splits object based on a single attribute into a list of subsetted objects,
one for each level of the attribute. For example, useful for taking an object
that contains cells from many patients, and subdividing it into
patient-specific objects.
}
\examples{
data("pbmc_small")
# Assign the test object a three level attribute
groups <- sample(c("group1", "group2", "group3"), size = 80, replace = TRUE)
names(groups) <- colnames(pbmc_small)
pbmc_small <- AddMetaData(object = pbmc_small, metadata = groups, col.name = "group")
obj.list <- SplitObject(pbmc_small, split.by = "group")
}
\concept{objects}
Seurat/man/LabelClusters.Rd 0000644 0001762 0000144 00000003050 14525500037 015322 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{LabelClusters}
\alias{LabelClusters}
\title{Label clusters on a ggplot2-based scatter plot}
\usage{
LabelClusters(
plot,
id,
clusters = NULL,
labels = NULL,
split.by = NULL,
repel = TRUE,
box = FALSE,
geom = "GeomPoint",
position = "median",
...
)
}
\arguments{
\item{plot}{A ggplot2-based scatter plot}
\item{id}{Name of variable used for coloring scatter plot}
\item{clusters}{Vector of cluster ids to label}
\item{labels}{Custom labels for the clusters}
\item{split.by}{Split labels by some grouping label, useful when using
\code{\link[ggplot2]{facet_wrap}} or \code{\link[ggplot2]{facet_grid}}}
\item{repel}{Use \code{geom_text_repel} to create nicely-repelled labels}
\item{box}{Use geom_label/geom_label_repel (includes a box around the text
labels)}
\item{geom}{Name of geom to get X/Y aesthetic names for}
\item{position}{How to place the label if repel = FALSE. If "median", place
the label at the median position. If "nearest" place the label at the
position of the nearest data point to the median.}
\item{...}{Extra parameters to \code{\link[ggrepel]{geom_text_repel}}, such as \code{size}}
}
\value{
A ggplot2-based scatter plot with cluster labels
}
\description{
Label clusters on a ggplot2-based scatter plot
}
\examples{
data("pbmc_small")
plot <- DimPlot(object = pbmc_small)
LabelClusters(plot = plot, id = 'ident')
}
\seealso{
\code{\link[ggrepel]{geom_text_repel}} \code{\link[ggplot2]{geom_text}}
}
\concept{visualization}
Seurat/man/FindClusters.Rd 0000644 0001762 0000144 00000006325 14525500037 015173 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/clustering.R
\name{FindClusters}
\alias{FindClusters}
\alias{FindClusters.default}
\alias{FindClusters.Seurat}
\title{Cluster Determination}
\usage{
FindClusters(object, ...)
\method{FindClusters}{default}(
object,
modularity.fxn = 1,
initial.membership = NULL,
node.sizes = NULL,
resolution = 0.8,
method = "matrix",
algorithm = 1,
n.start = 10,
n.iter = 10,
random.seed = 0,
group.singletons = TRUE,
temp.file.location = NULL,
edge.file.name = NULL,
verbose = TRUE,
...
)
\method{FindClusters}{Seurat}(
object,
graph.name = NULL,
cluster.name = NULL,
modularity.fxn = 1,
initial.membership = NULL,
node.sizes = NULL,
resolution = 0.8,
method = "matrix",
algorithm = 1,
n.start = 10,
n.iter = 10,
random.seed = 0,
group.singletons = TRUE,
temp.file.location = NULL,
edge.file.name = NULL,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods}
\item{modularity.fxn}{Modularity function (1 = standard; 2 = alternative).}
\item{initial.membership, node.sizes}{Parameters to pass to the Python leidenalg function.}
\item{resolution}{Value of the resolution parameter, use a value above
(below) 1.0 if you want to obtain a larger (smaller) number of communities.}
\item{method}{Method for running leiden (defaults to matrix which is fast for small datasets).
Enable method = "igraph" to avoid casting large data to a dense matrix.}
\item{algorithm}{Algorithm for modularity optimization (1 = original Louvain
algorithm; 2 = Louvain algorithm with multilevel refinement; 3 = SLM
algorithm; 4 = Leiden algorithm). Leiden requires the leidenalg python.}
\item{n.start}{Number of random starts.}
\item{n.iter}{Maximal number of iterations per random start.}
\item{random.seed}{Seed of the random number generator.}
\item{group.singletons}{Group singletons into nearest cluster. If FALSE, assign all singletons to
a "singleton" group}
\item{temp.file.location}{Directory where intermediate files will be written.
Specify the ABSOLUTE path.}
\item{edge.file.name}{Edge file to use as input for modularity optimizer jar.}
\item{verbose}{Print output}
\item{graph.name}{Name of graph to use for the clustering algorithm}
\item{cluster.name}{Name of output clusters}
}
\value{
Returns a Seurat object where the idents have been updated with new cluster info;
latest clustering results will be stored in object metadata under 'seurat_clusters'.
Note that 'seurat_clusters' will be overwritten everytime FindClusters is run
}
\description{
Identify clusters of cells by a shared nearest neighbor (SNN) modularity
optimization based clustering algorithm. First calculate k-nearest neighbors
and construct the SNN graph. Then optimize the modularity function to
determine clusters. For a full description of the algorithms, see Waltman and
van Eck (2013) \emph{The European Physical Journal B}. Thanks to Nigel
Delaney (evolvedmicrobe@github) for the rewrite of the Java modularity
optimizer code in Rcpp!
}
\details{
To run Leiden algorithm, you must first install the leidenalg python
package (e.g. via pip install leidenalg), see Traag et al (2018).
}
\concept{clustering}
Seurat/man/PseudobulkExpression.Rd 0000644 0001762 0000144 00000000641 14525500037 016756 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\name{PseudobulkExpression}
\alias{PseudobulkExpression}
\title{Pseudobulk Expression}
\usage{
PseudobulkExpression(object, ...)
}
\arguments{
\item{object}{An assay}
\item{...}{Arguments passed to other methods}
}
\value{
Returns object after normalization
}
\description{
Normalize the count data present in a given assay.
}
Seurat/man/PrepSCTFindMarkers.Rd 0000644 0001762 0000144 00000005462 14525500037 016175 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/differential_expression.R
\name{PrepSCTFindMarkers}
\alias{PrepSCTFindMarkers}
\title{Prepare object to run differential expression on SCT assay with multiple models}
\usage{
PrepSCTFindMarkers(object, assay = "SCT", verbose = TRUE)
}
\arguments{
\item{object}{Seurat object with SCT assays}
\item{assay}{Assay name where for SCT objects are stored; Default is 'SCT'}
\item{verbose}{Print messages and progress}
}
\value{
Returns a Seurat object with recorrected counts and data in the SCT assay.
}
\description{
Given a merged object with multiple SCT models, this function uses minimum
of the median UMI (calculated using the raw UMI counts) of individual objects
to reverse the individual SCT regression model using minimum of median UMI
as the sequencing depth covariate.
The counts slot of the SCT assay is replaced with recorrected counts and
the data slot is replaced with log1p of recorrected counts.
}
\section{Progress Updates with \pkg{progressr}}{
This function uses
\href{https://cran.r-project.org/package=progressr}{\pkg{progressr}} to
render status updates and progress bars. To enable progress updates, wrap
the function call in \code{\link[progressr]{with_progress}} or run
\code{\link[progressr:handlers]{handlers(global = TRUE)}} before running
this function. For more details about \pkg{progressr}, please read
\href{https://progressr.futureverse.org/articles/progressr-intro.html}{\code{vignette("progressr-intro")}}
}
\section{Parallelization with \pkg{future}}{
This function uses
\href{https://cran.r-project.org/package=future}{\pkg{future}} to enable
parallelization. Parallelization strategies can be set using
\code{\link[future]{plan}}. Common plans include \dQuote{\code{sequential}}
for non-parallelized processing or \dQuote{\code{multisession}} for parallel
evaluation using multiple \R sessions; for other plans, see the
\dQuote{Implemented evaluation strategies} section of
\code{\link[future:plan]{?future::plan}}. For a more thorough introduction
to \pkg{future}, see
\href{https://future.futureverse.org/articles/future-1-overview.html}{\code{vignette("future-1-overview")}}
}
\examples{
data("pbmc_small")
pbmc_small1 <- SCTransform(object = pbmc_small, variable.features.n = 20, vst.flavor="v1")
pbmc_small2 <- SCTransform(object = pbmc_small, variable.features.n = 20, vst.flavor="v1")
pbmc_merged <- merge(x = pbmc_small1, y = pbmc_small2)
pbmc_merged <- PrepSCTFindMarkers(object = pbmc_merged)
markers <- FindMarkers(
object = pbmc_merged,
ident.1 = "0",
ident.2 = "1",
assay = "SCT"
)
pbmc_subset <- subset(pbmc_merged, idents = c("0", "1"))
markers_subset <- FindMarkers(
object = pbmc_subset,
ident.1 = "0",
ident.2 = "1",
assay = "SCT",
recorrect_umi = FALSE
)
}
\concept{differential_expression}
\concept{future}
Seurat/man/reexports.Rd 0000644 0001762 0000144 00000010160 14525500056 014612 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reexports.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{components}
\alias{\%||\%}
\alias{\%iff\%}
\alias{AddMetaData}
\alias{as.Graph}
\alias{as.Neighbor}
\alias{as.Seurat}
\alias{as.sparse}
\alias{Assays}
\alias{Cells}
\alias{CellsByIdentities}
\alias{Command}
\alias{CreateAssayObject}
\alias{CreateDimReducObject}
\alias{CreateSeuratObject}
\alias{DefaultAssay}
\alias{DefaultAssay<-}
\alias{Distances}
\alias{Embeddings}
\alias{FetchData}
\alias{GetAssayData}
\alias{GetImage}
\alias{GetTissueCoordinates}
\alias{HVFInfo}
\alias{Idents}
\alias{Idents<-}
\alias{Images}
\alias{Index}
\alias{Index<-}
\alias{Indices}
\alias{IsGlobal}
\alias{JS}
\alias{JS<-}
\alias{Key}
\alias{Key<-}
\alias{Loadings}
\alias{Loadings<-}
\alias{LogSeuratCommand}
\alias{Misc}
\alias{Misc<-}
\alias{Neighbors}
\alias{Project}
\alias{Project<-}
\alias{Radius}
\alias{Reductions}
\alias{RenameCells}
\alias{RenameIdents}
\alias{ReorderIdent}
\alias{RowMergeSparseMatrices}
\alias{SetAssayData}
\alias{SetIdent}
\alias{SpatiallyVariableFeatures}
\alias{StashIdent}
\alias{Stdev}
\alias{SVFInfo}
\alias{Tool}
\alias{Tool<-}
\alias{UpdateSeuratObject}
\alias{VariableFeatures}
\alias{VariableFeatures<-}
\alias{WhichCells}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{generics}{\code{\link[generics]{components}}}
\item{SeuratObject}{\code{\link[SeuratObject:set-if-null]{\%||\%}}, \code{\link[SeuratObject:set-if-null]{\%iff\%}}, \code{\link[SeuratObject]{AddMetaData}}, \code{\link[SeuratObject]{as.Graph}}, \code{\link[SeuratObject]{as.Neighbor}}, \code{\link[SeuratObject]{as.Seurat}}, \code{\link[SeuratObject]{as.sparse}}, \code{\link[SeuratObject:ObjectAccess]{Assays}}, \code{\link[SeuratObject]{Cells}}, \code{\link[SeuratObject]{CellsByIdentities}}, \code{\link[SeuratObject]{Command}}, \code{\link[SeuratObject]{CreateAssayObject}}, \code{\link[SeuratObject]{CreateDimReducObject}}, \code{\link[SeuratObject]{CreateSeuratObject}}, \code{\link[SeuratObject]{DefaultAssay}}, \code{\link[SeuratObject:DefaultAssay]{DefaultAssay<-}}, \code{\link[SeuratObject]{Distances}}, \code{\link[SeuratObject]{Embeddings}}, \code{\link[SeuratObject]{FetchData}}, \code{\link[SeuratObject:AssayData]{GetAssayData}}, \code{\link[SeuratObject]{GetImage}}, \code{\link[SeuratObject]{GetTissueCoordinates}}, \code{\link[SeuratObject:VariableFeatures]{HVFInfo}}, \code{\link[SeuratObject]{Idents}}, \code{\link[SeuratObject:Idents]{Idents<-}}, \code{\link[SeuratObject]{Images}}, \code{\link[SeuratObject:NNIndex]{Index}}, \code{\link[SeuratObject:NNIndex]{Index<-}}, \code{\link[SeuratObject]{Indices}}, \code{\link[SeuratObject]{IsGlobal}}, \code{\link[SeuratObject]{JS}}, \code{\link[SeuratObject:JS]{JS<-}}, \code{\link[SeuratObject]{Key}}, \code{\link[SeuratObject:Key]{Key<-}}, \code{\link[SeuratObject]{Loadings}}, \code{\link[SeuratObject:Loadings]{Loadings<-}}, \code{\link[SeuratObject]{LogSeuratCommand}}, \code{\link[SeuratObject]{Misc}}, \code{\link[SeuratObject:Misc]{Misc<-}}, \code{\link[SeuratObject:ObjectAccess]{Neighbors}}, \code{\link[SeuratObject]{Project}}, \code{\link[SeuratObject:Project]{Project<-}}, \code{\link[SeuratObject]{Radius}}, \code{\link[SeuratObject:ObjectAccess]{Reductions}}, \code{\link[SeuratObject]{RenameCells}}, \code{\link[SeuratObject:Idents]{RenameIdents}}, \code{\link[SeuratObject:Idents]{ReorderIdent}}, \code{\link[SeuratObject]{RowMergeSparseMatrices}}, \code{\link[SeuratObject:AssayData]{SetAssayData}}, \code{\link[SeuratObject:Idents]{SetIdent}}, \code{\link[SeuratObject:VariableFeatures]{SpatiallyVariableFeatures}}, \code{\link[SeuratObject:Idents]{StashIdent}}, \code{\link[SeuratObject]{Stdev}}, \code{\link[SeuratObject:VariableFeatures]{SVFInfo}}, \code{\link[SeuratObject]{Tool}}, \code{\link[SeuratObject:Tool]{Tool<-}}, \code{\link[SeuratObject]{UpdateSeuratObject}}, \code{\link[SeuratObject]{VariableFeatures}}, \code{\link[SeuratObject:VariableFeatures]{VariableFeatures<-}}, \code{\link[SeuratObject]{WhichCells}}}
}}
Seurat/man/RunICA.Rd 0000644 0001762 0000144 00000004263 14525500037 013646 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/dimensional_reduction.R
\name{RunICA}
\alias{RunICA}
\alias{RunICA.default}
\alias{RunICA.Assay}
\alias{RunICA.Seurat}
\title{Run Independent Component Analysis on gene expression}
\usage{
RunICA(object, ...)
\method{RunICA}{default}(
object,
assay = NULL,
nics = 50,
rev.ica = FALSE,
ica.function = "icafast",
verbose = TRUE,
ndims.print = 1:5,
nfeatures.print = 30,
reduction.name = "ica",
reduction.key = "ica_",
seed.use = 42,
...
)
\method{RunICA}{Assay}(
object,
assay = NULL,
features = NULL,
nics = 50,
rev.ica = FALSE,
ica.function = "icafast",
verbose = TRUE,
ndims.print = 1:5,
nfeatures.print = 30,
reduction.name = "ica",
reduction.key = "ica_",
seed.use = 42,
...
)
\method{RunICA}{Seurat}(
object,
assay = NULL,
features = NULL,
nics = 50,
rev.ica = FALSE,
ica.function = "icafast",
verbose = TRUE,
ndims.print = 1:5,
nfeatures.print = 30,
reduction.name = "ica",
reduction.key = "IC_",
seed.use = 42,
...
)
}
\arguments{
\item{object}{Seurat object}
\item{\dots}{Additional arguments to be passed to fastica}
\item{assay}{Name of Assay ICA is being run on}
\item{nics}{Number of ICs to compute}
\item{rev.ica}{By default, computes the dimensional reduction on the cell x
feature matrix. Setting to true will compute it on the transpose (feature x cell
matrix).}
\item{ica.function}{ICA function from ica package to run (options: icafast,
icaimax, icajade)}
\item{verbose}{Print the top genes associated with high/low loadings for
the ICs}
\item{ndims.print}{ICs to print genes for}
\item{nfeatures.print}{Number of genes to print for each IC}
\item{reduction.name}{dimensional reduction name}
\item{reduction.key}{dimensional reduction key, specifies the string before
the number for the dimension names.}
\item{seed.use}{Set a random seed. Setting NULL will not set a seed.}
\item{features}{Features to compute ICA on}
}
\description{
Run fastica algorithm from the ica package for ICA dimensionality reduction.
For details about stored ICA calculation parameters, see
\code{PrintICAParams}.
}
\concept{dimensional_reduction}
Seurat/man/DietSeurat.Rd 0000644 0001762 0000144 00000002557 14525500037 014642 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{DietSeurat}
\alias{DietSeurat}
\title{Slim down a Seurat object}
\usage{
DietSeurat(
object,
layers = NULL,
features = NULL,
assays = NULL,
dimreducs = NULL,
graphs = NULL,
misc = TRUE,
counts = deprecated(),
data = deprecated(),
scale.data = deprecated(),
...
)
}
\arguments{
\item{object}{A \code{\link[SeuratObject]{Seurat}} object}
\item{layers}{A vector or named list of layers to keep}
\item{features}{Only keep a subset of features, defaults to all features}
\item{assays}{Only keep a subset of assays specified here}
\item{dimreducs}{Only keep a subset of DimReducs specified here (if
\code{NULL}, remove all DimReducs)}
\item{graphs}{Only keep a subset of Graphs specified here (if \code{NULL},
remove all Graphs)}
\item{misc}{Preserve the \code{misc} slot; default is \code{TRUE}}
\item{counts}{Preserve the count matrices for the assays specified}
\item{data}{Preserve the data matrices for the assays specified}
\item{scale.data}{Preserve the scale data matrices for the assays specified}
\item{...}{Ignored}
}
\value{
\code{object} with only the sub-object specified retained
}
\description{
Keep only certain aspects of the Seurat object. Can be useful in functions
that utilize merge as it reduces the amount of data in the merge
}
\concept{objects}
Seurat/man/PlotPerturbScore.Rd 0000644 0001762 0000144 00000002727 14525500037 016046 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixscape.R
\name{PlotPerturbScore}
\alias{PlotPerturbScore}
\title{Function to plot perturbation score distributions.}
\usage{
PlotPerturbScore(
object,
target.gene.class = "gene",
target.gene.ident = NULL,
mixscape.class = "mixscape_class",
col = "orange2",
split.by = NULL,
before.mixscape = FALSE,
prtb.type = "KO"
)
}
\arguments{
\item{object}{An object of class Seurat.}
\item{target.gene.class}{meta data column specifying all target gene names in the experiment.}
\item{target.gene.ident}{Target gene name to visualize perturbation scores for.}
\item{mixscape.class}{meta data column specifying mixscape classifications.}
\item{col}{Specify color of target gene class or knockout cell class. For
control non-targeting and non-perturbed cells, colors are set to different
shades of grey.}
\item{split.by}{For datasets with more than one cell type. Set equal TRUE to visualize perturbation scores for each cell type separately.}
\item{before.mixscape}{Option to split densities based on mixscape classification (default) or original target gene classification.
Default is set to NULL and plots cells by original class ID.}
\item{prtb.type}{specify type of CRISPR perturbation expected for labeling mixscape classifications. Default is KO.}
}
\value{
A ggplot object.
}
\description{
Density plots to visualize perturbation scores calculated from RunMixscape
function.
}
\concept{mixscape}
Seurat/man/FastRowScale.Rd 0000644 0001762 0000144 00000001300 14525500037 015107 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{FastRowScale}
\alias{FastRowScale}
\title{Scale and/or center matrix rowwise}
\usage{
FastRowScale(mat, center = TRUE, scale = TRUE, scale_max = 10)
}
\arguments{
\item{mat}{A matrix}
\item{center}{a logical value indicating whether to center the rows}
\item{scale}{a logical value indicating whether to scale the rows}
\item{scale_max}{clip all values greater than scale_max to scale_max. Don't
clip if Inf.}
}
\value{
Returns the center/scaled matrix
}
\description{
Performs row scaling and/or centering. Equivalent to using t(scale(t(mat)))
in R except in the case of NA values.
}
\concept{utilities}
Seurat/man/ModalityWeights-class.Rd 0000644 0001762 0000144 00000001764 14525500037 017010 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{ModalityWeights-class}
\alias{ModalityWeights-class}
\alias{ModalityWeights}
\title{The ModalityWeights Class}
\description{
The ModalityWeights class is an intermediate data storage class that stores the modality weight and other
related information needed for performing downstream analyses - namely data integration
(\code{FindModalityWeights}) and data transfer (\code{\link{FindMultiModalNeighbors}}).
}
\section{Slots}{
\describe{
\item{\code{modality.weight.list}}{A list of modality weights value from all modalities}
\item{\code{modality.assay}}{Names of assays for the list of dimensional reductions}
\item{\code{params}}{A list of parameters used in the FindModalityWeights}
\item{\code{score.matrix}}{a list of score matrices representing cross and within-modality prediction
score, and kernel value}
\item{\code{command}}{Store log of parameters that were used}
}}
\concept{objects}
Seurat/man/MixscapeHeatmap.Rd 0000644 0001762 0000144 00000012113 14525500037 015627 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixscape.R
\name{MixscapeHeatmap}
\alias{MixscapeHeatmap}
\title{Differential expression heatmap for mixscape}
\usage{
MixscapeHeatmap(
object,
ident.1 = NULL,
ident.2 = NULL,
balanced = TRUE,
logfc.threshold = 0.25,
assay = "RNA",
max.genes = 100,
test.use = "wilcox",
max.cells.group = NULL,
order.by.prob = TRUE,
group.by = NULL,
mixscape.class = "mixscape_class",
prtb.type = "KO",
fc.name = "avg_log2FC",
pval.cutoff = 0.05,
...
)
}
\arguments{
\item{object}{An object}
\item{ident.1}{Identity class to define markers for; pass an object of class
\code{phylo} or 'clustertree' to find markers for a node in a cluster tree;
passing 'clustertree' requires \code{\link{BuildClusterTree}} to have been run}
\item{ident.2}{A second identity class for comparison; if \code{NULL},
use all other cells for comparison; if an object of class \code{phylo} or
'clustertree' is passed to \code{ident.1}, must pass a node to find markers for}
\item{balanced}{Plot an equal number of genes with both groups of cells.}
\item{logfc.threshold}{Limit testing to genes which show, on average, at least
X-fold difference (log-scale) between the two groups of cells. Default is 0.1
Increasing logfc.threshold speeds up the function, but can miss weaker signals.}
\item{assay}{Assay to use in differential expression testing}
\item{max.genes}{Total number of DE genes to plot.}
\item{test.use}{Denotes which test to use. Available options are:
\itemize{
\item{"wilcox"} : Identifies differentially expressed genes between two
groups of cells using a Wilcoxon Rank Sum test (default); will use a fast
implementation by Presto if installed
\item{"wilcox_limma"} : Identifies differentially expressed genes between two
groups of cells using the limma implementation of the Wilcoxon Rank Sum test;
set this option to reproduce results from Seurat v4
\item{"bimod"} : Likelihood-ratio test for single cell gene expression,
(McDavid et al., Bioinformatics, 2013)
\item{"roc"} : Identifies 'markers' of gene expression using ROC analysis.
For each gene, evaluates (using AUC) a classifier built on that gene alone,
to classify between two groups of cells. An AUC value of 1 means that
expression values for this gene alone can perfectly classify the two
groupings (i.e. Each of the cells in cells.1 exhibit a higher level than
each of the cells in cells.2). An AUC value of 0 also means there is perfect
classification, but in the other direction. A value of 0.5 implies that
the gene has no predictive power to classify the two groups. Returns a
'predictive power' (abs(AUC-0.5) * 2) ranked matrix of putative differentially
expressed genes.
\item{"t"} : Identify differentially expressed genes between two groups of
cells using the Student's t-test.
\item{"negbinom"} : Identifies differentially expressed genes between two
groups of cells using a negative binomial generalized linear model.
Use only for UMI-based datasets
\item{"poisson"} : Identifies differentially expressed genes between two
groups of cells using a poisson generalized linear model.
Use only for UMI-based datasets
\item{"LR"} : Uses a logistic regression framework to determine differentially
expressed genes. Constructs a logistic regression model predicting group
membership based on each feature individually and compares this to a null
model with a likelihood ratio test.
\item{"MAST"} : Identifies differentially expressed genes between two groups
of cells using a hurdle model tailored to scRNA-seq data. Utilizes the MAST
package to run the DE testing.
\item{"DESeq2"} : Identifies differentially expressed genes between two groups
of cells based on a model using DESeq2 which uses a negative binomial
distribution (Love et al, Genome Biology, 2014).This test does not support
pre-filtering of genes based on average difference (or percent detection rate)
between cell groups. However, genes may be pre-filtered based on their
minimum detection rate (min.pct) across both cell groups. To use this method,
please install DESeq2, using the instructions at
https://bioconductor.org/packages/release/bioc/html/DESeq2.html
}}
\item{max.cells.group}{Number of cells per identity to plot.}
\item{order.by.prob}{Order cells on heatmap based on their mixscape knockout
probability from highest to lowest score.}
\item{group.by}{(Deprecated) Option to split densities based on mixscape
classification. Please use mixscape.class instead}
\item{mixscape.class}{metadata column with mixscape classifications.}
\item{prtb.type}{specify type of CRISPR perturbation expected for labeling
mixscape classifications. Default is KO.}
\item{fc.name}{Name of the fold change, average difference, or custom
function column in the output data.frame. Default is avg_log2FC}
\item{pval.cutoff}{P-value cut-off for selection of significantly DE genes.}
\item{...}{Arguments passed to other methods and to specific DE methods}
}
\value{
A ggplot object.
}
\description{
Draws a heatmap of single cell feature expression with cells ordered by their
mixscape ko probabilities.
}
\concept{mixscape}
Seurat/man/DotPlot.Rd 0000644 0001762 0000144 00000005302 14525500037 014145 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{DotPlot}
\alias{DotPlot}
\alias{SplitDotPlotGG}
\title{Dot plot visualization}
\usage{
DotPlot(
object,
features,
assay = NULL,
cols = c("lightgrey", "blue"),
col.min = -2.5,
col.max = 2.5,
dot.min = 0,
dot.scale = 6,
idents = NULL,
group.by = NULL,
split.by = NULL,
cluster.idents = FALSE,
scale = TRUE,
scale.by = "radius",
scale.min = NA,
scale.max = NA
)
}
\arguments{
\item{object}{Seurat object}
\item{features}{Input vector of features, or named list of feature vectors
if feature-grouped panels are desired (replicates the functionality of the
old SplitDotPlotGG)}
\item{assay}{Name of assay to use, defaults to the active assay}
\item{cols}{Colors to plot: the name of a palette from
\code{RColorBrewer::brewer.pal.info}, a pair of colors defining a gradient,
or 3+ colors defining multiple gradients (if split.by is set)}
\item{col.min}{Minimum scaled average expression threshold (everything
smaller will be set to this)}
\item{col.max}{Maximum scaled average expression threshold (everything larger
will be set to this)}
\item{dot.min}{The fraction of cells at which to draw the smallest dot
(default is 0). All cell groups with less than this expressing the given
gene will have no dot drawn.}
\item{dot.scale}{Scale the size of the points, similar to cex}
\item{idents}{Identity classes to include in plot (default is all)}
\item{group.by}{Factor to group the cells by}
\item{split.by}{A factor in object metadata to split the plot by, pass 'ident'
to split by cell identity'
see \code{\link{FetchData}} for more details}
\item{cluster.idents}{Whether to order identities by hierarchical clusters
based on given features, default is FALSE}
\item{scale}{Determine whether the data is scaled, TRUE for default}
\item{scale.by}{Scale the size of the points by 'size' or by 'radius'}
\item{scale.min}{Set lower limit for scaling, use NA for default}
\item{scale.max}{Set upper limit for scaling, use NA for default}
}
\value{
A ggplot object
}
\description{
Intuitive way of visualizing how feature expression changes across different
identity classes (clusters). The size of the dot encodes the percentage of
cells within a class, while the color encodes the AverageExpression level
across all cells within a class (blue is high).
}
\examples{
data("pbmc_small")
cd_genes <- c("CD247", "CD3E", "CD9")
DotPlot(object = pbmc_small, features = cd_genes)
pbmc_small[['groups']] <- sample(x = c('g1', 'g2'), size = ncol(x = pbmc_small), replace = TRUE)
DotPlot(object = pbmc_small, features = cd_genes, split.by = 'groups')
}
\seealso{
\code{RColorBrewer::brewer.pal.info}
}
\concept{visualization}
Seurat/man/PlotClusterTree.Rd 0000644 0001762 0000144 00000001614 14525500037 015662 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{PlotClusterTree}
\alias{PlotClusterTree}
\title{Plot clusters as a tree}
\usage{
PlotClusterTree(object, direction = "downwards", ...)
}
\arguments{
\item{object}{Seurat object}
\item{direction}{A character string specifying the direction of the tree (default is downwards)
Possible options: "rightwards", "leftwards", "upwards", and "downwards".}
\item{\dots}{Additional arguments to
\code{\link[ape:plot.phylo]{ape::plot.phylo}}}
}
\value{
Plots dendogram (must be precomputed using BuildClusterTree), returns no value
}
\description{
Plots previously computed tree (from BuildClusterTree)
}
\examples{
\dontrun{
if (requireNamespace("ape", quietly = TRUE)) {
data("pbmc_small")
pbmc_small <- BuildClusterTree(object = pbmc_small)
PlotClusterTree(object = pbmc_small)
}
}
}
\concept{visualization}
Seurat/man/RunSPCA.Rd 0000644 0001762 0000144 00000005136 14525500037 014000 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/dimensional_reduction.R
\name{RunSPCA}
\alias{RunSPCA}
\alias{RunSPCA.default}
\alias{RunSPCA.Assay}
\alias{RunSPCA.Assay5}
\alias{RunSPCA.Seurat}
\title{Run Supervised Principal Component Analysis}
\usage{
RunSPCA(object, ...)
\method{RunSPCA}{default}(
object,
assay = NULL,
npcs = 50,
reduction.key = "SPC_",
graph = NULL,
verbose = FALSE,
seed.use = 42,
...
)
\method{RunSPCA}{Assay}(
object,
assay = NULL,
features = NULL,
npcs = 50,
reduction.key = "SPC_",
graph = NULL,
verbose = TRUE,
seed.use = 42,
...
)
\method{RunSPCA}{Assay5}(
object,
assay = NULL,
features = NULL,
npcs = 50,
reduction.key = "SPC_",
graph = NULL,
verbose = TRUE,
seed.use = 42,
layer = "scale.data",
...
)
\method{RunSPCA}{Seurat}(
object,
assay = NULL,
features = NULL,
npcs = 50,
reduction.name = "spca",
reduction.key = "SPC_",
graph = NULL,
verbose = TRUE,
seed.use = 42,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods and IRLBA}
\item{assay}{Name of Assay SPCA is being run on}
\item{npcs}{Total Number of SPCs to compute and store (50 by default)}
\item{reduction.key}{dimensional reduction key, specifies the string before
the number for the dimension names. SPC by default}
\item{graph}{Graph used supervised by SPCA}
\item{verbose}{Print the top genes associated with high/low loadings for
the SPCs}
\item{seed.use}{Set a random seed. By default, sets the seed to 42. Setting
NULL will not set a seed.}
\item{features}{Features to compute SPCA on. If features=NULL, SPCA will be run
using the variable features for the Assay.}
\item{layer}{Layer to run SPCA on}
\item{reduction.name}{dimensional reduction name, spca by default}
}
\value{
Returns Seurat object with the SPCA calculation stored in the reductions slot
}
\description{
Run a supervised PCA (SPCA) dimensionality reduction supervised by a cell-cell kernel.
SPCA is used to capture a linear transformation which maximizes its dependency to
the given cell-cell kernel. We use SNN graph as the kernel to supervise the linear
matrix factorization.
}
\references{
Barshan E, Ghodsi A, Azimifar Z, Jahromi MZ.
Supervised principal component analysis: Visualization, classification and
regression on subspaces and submanifolds.
Pattern Recognition. 2011 Jul 1;44(7):1357-71. \url{https://www.sciencedirect.com/science/article/pii/S0031320310005819?casa_token=AZMFg5OtPnAAAAAA:_Udu7GJ7G2ed1-XSmr-3IGSISUwcHfMpNtCj-qacXH5SBC4nwzVid36GXI3r8XG8dK5WOQui};
}
\concept{dimensional_reduction}
Seurat/man/FindMarkers.Rd 0000644 0001762 0000144 00000027613 14525500037 014776 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/differential_expression.R
\name{FindMarkers}
\alias{FindMarkers}
\alias{FindMarkersNode}
\alias{FindMarkers.default}
\alias{FindMarkers.Assay}
\alias{FindMarkers.SCTAssay}
\alias{FindMarkers.DimReduc}
\alias{FindMarkers.Seurat}
\title{Gene expression markers of identity classes}
\usage{
FindMarkers(object, ...)
\method{FindMarkers}{default}(
object,
slot = "data",
counts = numeric(),
cells.1 = NULL,
cells.2 = NULL,
features = NULL,
logfc.threshold = 0.1,
test.use = "wilcox",
min.pct = 0.01,
min.diff.pct = -Inf,
verbose = TRUE,
only.pos = FALSE,
max.cells.per.ident = Inf,
random.seed = 1,
latent.vars = NULL,
min.cells.feature = 3,
min.cells.group = 3,
pseudocount.use = 1,
fc.results = NULL,
densify = FALSE,
...
)
\method{FindMarkers}{Assay}(
object,
slot = "data",
cells.1 = NULL,
cells.2 = NULL,
features = NULL,
logfc.threshold = 0.1,
test.use = "wilcox",
min.pct = 0.01,
min.diff.pct = -Inf,
verbose = TRUE,
only.pos = FALSE,
max.cells.per.ident = Inf,
random.seed = 1,
latent.vars = NULL,
min.cells.feature = 3,
min.cells.group = 3,
pseudocount.use = 1,
mean.fxn = NULL,
fc.name = NULL,
base = 2,
densify = FALSE,
norm.method = NULL,
...
)
\method{FindMarkers}{SCTAssay}(
object,
slot = "data",
cells.1 = NULL,
cells.2 = NULL,
features = NULL,
logfc.threshold = 0.1,
test.use = "wilcox",
min.pct = 0.01,
min.diff.pct = -Inf,
verbose = TRUE,
only.pos = FALSE,
max.cells.per.ident = Inf,
random.seed = 1,
latent.vars = NULL,
min.cells.feature = 3,
min.cells.group = 3,
pseudocount.use = 1,
mean.fxn = NULL,
fc.name = NULL,
base = 2,
densify = FALSE,
recorrect_umi = TRUE,
...
)
\method{FindMarkers}{DimReduc}(
object,
cells.1 = NULL,
cells.2 = NULL,
features = NULL,
logfc.threshold = 0.1,
test.use = "wilcox",
min.pct = 0.01,
min.diff.pct = -Inf,
verbose = TRUE,
only.pos = FALSE,
max.cells.per.ident = Inf,
random.seed = 1,
latent.vars = NULL,
min.cells.feature = 3,
min.cells.group = 3,
pseudocount.use = 1,
mean.fxn = rowMeans,
fc.name = NULL,
densify = FALSE,
...
)
\method{FindMarkers}{Seurat}(
object,
ident.1 = NULL,
ident.2 = NULL,
group.by = NULL,
subset.ident = NULL,
assay = NULL,
slot = "data",
reduction = NULL,
features = NULL,
logfc.threshold = 0.1,
pseudocount.use = 1,
test.use = "wilcox",
min.pct = 0.01,
min.diff.pct = -Inf,
verbose = TRUE,
only.pos = FALSE,
max.cells.per.ident = Inf,
random.seed = 1,
latent.vars = NULL,
min.cells.feature = 3,
min.cells.group = 3,
mean.fxn = NULL,
fc.name = NULL,
base = 2,
densify = FALSE,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods and to specific DE methods}
\item{slot}{Slot to pull data from; note that if \code{test.use} is "negbinom", "poisson", or "DESeq2",
\code{slot} will be set to "counts"}
\item{counts}{Count matrix if using scale.data for DE tests. This is used for
computing pct.1 and pct.2 and for filtering features based on fraction
expressing}
\item{cells.1}{Vector of cell names belonging to group 1}
\item{cells.2}{Vector of cell names belonging to group 2}
\item{features}{Genes to test. Default is to use all genes}
\item{logfc.threshold}{Limit testing to genes which show, on average, at least
X-fold difference (log-scale) between the two groups of cells. Default is 0.1
Increasing logfc.threshold speeds up the function, but can miss weaker signals.}
\item{test.use}{Denotes which test to use. Available options are:
\itemize{
\item{"wilcox"} : Identifies differentially expressed genes between two
groups of cells using a Wilcoxon Rank Sum test (default); will use a fast
implementation by Presto if installed
\item{"wilcox_limma"} : Identifies differentially expressed genes between two
groups of cells using the limma implementation of the Wilcoxon Rank Sum test;
set this option to reproduce results from Seurat v4
\item{"bimod"} : Likelihood-ratio test for single cell gene expression,
(McDavid et al., Bioinformatics, 2013)
\item{"roc"} : Identifies 'markers' of gene expression using ROC analysis.
For each gene, evaluates (using AUC) a classifier built on that gene alone,
to classify between two groups of cells. An AUC value of 1 means that
expression values for this gene alone can perfectly classify the two
groupings (i.e. Each of the cells in cells.1 exhibit a higher level than
each of the cells in cells.2). An AUC value of 0 also means there is perfect
classification, but in the other direction. A value of 0.5 implies that
the gene has no predictive power to classify the two groups. Returns a
'predictive power' (abs(AUC-0.5) * 2) ranked matrix of putative differentially
expressed genes.
\item{"t"} : Identify differentially expressed genes between two groups of
cells using the Student's t-test.
\item{"negbinom"} : Identifies differentially expressed genes between two
groups of cells using a negative binomial generalized linear model.
Use only for UMI-based datasets
\item{"poisson"} : Identifies differentially expressed genes between two
groups of cells using a poisson generalized linear model.
Use only for UMI-based datasets
\item{"LR"} : Uses a logistic regression framework to determine differentially
expressed genes. Constructs a logistic regression model predicting group
membership based on each feature individually and compares this to a null
model with a likelihood ratio test.
\item{"MAST"} : Identifies differentially expressed genes between two groups
of cells using a hurdle model tailored to scRNA-seq data. Utilizes the MAST
package to run the DE testing.
\item{"DESeq2"} : Identifies differentially expressed genes between two groups
of cells based on a model using DESeq2 which uses a negative binomial
distribution (Love et al, Genome Biology, 2014).This test does not support
pre-filtering of genes based on average difference (or percent detection rate)
between cell groups. However, genes may be pre-filtered based on their
minimum detection rate (min.pct) across both cell groups. To use this method,
please install DESeq2, using the instructions at
https://bioconductor.org/packages/release/bioc/html/DESeq2.html
}}
\item{min.pct}{only test genes that are detected in a minimum fraction of
min.pct cells in either of the two populations. Meant to speed up the function
by not testing genes that are very infrequently expressed. Default is 0.01}
\item{min.diff.pct}{only test genes that show a minimum difference in the
fraction of detection between the two groups. Set to -Inf by default}
\item{verbose}{Print a progress bar once expression testing begins}
\item{only.pos}{Only return positive markers (FALSE by default)}
\item{max.cells.per.ident}{Down sample each identity class to a max number.
Default is no downsampling. Not activated by default (set to Inf)}
\item{random.seed}{Random seed for downsampling}
\item{latent.vars}{Variables to test, used only when \code{test.use} is one of
'LR', 'negbinom', 'poisson', or 'MAST'}
\item{min.cells.feature}{Minimum number of cells expressing the feature in at least one
of the two groups, currently only used for poisson and negative binomial tests}
\item{min.cells.group}{Minimum number of cells in one of the groups}
\item{pseudocount.use}{Pseudocount to add to averaged expression values when
calculating logFC. 1 by default.}
\item{fc.results}{data.frame from FoldChange}
\item{densify}{Convert the sparse matrix to a dense form before running the DE test. This can provide speedups but might require higher memory; default is FALSE}
\item{mean.fxn}{Function to use for fold change or average difference calculation.
If NULL, the appropriate function will be chose according to the slot used}
\item{fc.name}{Name of the fold change, average difference, or custom function column
in the output data.frame. If NULL, the fold change column will be named
according to the logarithm base (eg, "avg_log2FC"), or if using the scale.data
slot "avg_diff".}
\item{base}{The base with respect to which logarithms are computed.}
\item{norm.method}{Normalization method for fold change calculation when
\code{slot} is \dQuote{\code{data}}}
\item{recorrect_umi}{Recalculate corrected UMI counts using minimum of the median UMIs when performing DE using multiple SCT objects; default is TRUE}
\item{ident.1}{Identity class to define markers for; pass an object of class
\code{phylo} or 'clustertree' to find markers for a node in a cluster tree;
passing 'clustertree' requires \code{\link{BuildClusterTree}} to have been run}
\item{ident.2}{A second identity class for comparison; if \code{NULL},
use all other cells for comparison; if an object of class \code{phylo} or
'clustertree' is passed to \code{ident.1}, must pass a node to find markers for}
\item{group.by}{Regroup cells into a different identity class prior to performing differential expression (see example)}
\item{subset.ident}{Subset a particular identity class prior to regrouping. Only relevant if group.by is set (see example)}
\item{assay}{Assay to use in differential expression testing}
\item{reduction}{Reduction to use in differential expression testing - will test for DE on cell embeddings}
}
\value{
data.frame with a ranked list of putative markers as rows, and associated
statistics as columns (p-values, ROC score, etc., depending on the test used (\code{test.use})). The following columns are always present:
\itemize{
\item \code{avg_logFC}: log fold-chage of the average expression between the two groups. Positive values indicate that the gene is more highly expressed in the first group
\item \code{pct.1}: The percentage of cells where the gene is detected in the first group
\item \code{pct.2}: The percentage of cells where the gene is detected in the second group
\item \code{p_val_adj}: Adjusted p-value, based on bonferroni correction using all genes in the dataset
}
}
\description{
Finds markers (differentially expressed genes) for identity classes
}
\details{
p-value adjustment is performed using bonferroni correction based on
the total number of genes in the dataset. Other correction methods are not
recommended, as Seurat pre-filters genes using the arguments above, reducing
the number of tests performed. Lastly, as Aaron Lun has pointed out, p-values
should be interpreted cautiously, as the genes used for clustering are the
same genes tested for differential expression.
}
\examples{
\dontrun{
data("pbmc_small")
# Find markers for cluster 2
markers <- FindMarkers(object = pbmc_small, ident.1 = 2)
head(x = markers)
# Take all cells in cluster 2, and find markers that separate cells in the 'g1' group (metadata
# variable 'group')
markers <- FindMarkers(pbmc_small, ident.1 = "g1", group.by = 'groups', subset.ident = "2")
head(x = markers)
# Pass 'clustertree' or an object of class phylo to ident.1 and
# a node to ident.2 as a replacement for FindMarkersNode
if (requireNamespace("ape", quietly = TRUE)) {
pbmc_small <- BuildClusterTree(object = pbmc_small)
markers <- FindMarkers(object = pbmc_small, ident.1 = 'clustertree', ident.2 = 5)
head(x = markers)
}
}
}
\references{
McDavid A, Finak G, Chattopadyay PK, et al. Data exploration,
quality control and testing in single-cell qPCR-based gene expression experiments.
Bioinformatics. 2013;29(4):461-467. doi:10.1093/bioinformatics/bts714
Trapnell C, et al. The dynamics and regulators of cell fate
decisions are revealed by pseudotemporal ordering of single cells. Nature
Biotechnology volume 32, pages 381-386 (2014)
Andrew McDavid, Greg Finak and Masanao Yajima (2017). MAST: Model-based
Analysis of Single Cell Transcriptomics. R package version 1.2.1.
https://github.com/RGLab/MAST/
Love MI, Huber W and Anders S (2014). "Moderated estimation of
fold change and dispersion for RNA-seq data with DESeq2." Genome Biology.
https://bioconductor.org/packages/release/bioc/html/DESeq2.html
}
\seealso{
\code{FoldChange}
}
\concept{differential_expression}
Seurat/man/SingleExIPlot.Rd 0000644 0001762 0000144 00000002636 14525500037 015255 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{SingleExIPlot}
\alias{SingleExIPlot}
\title{Plot a single expression by identity on a plot}
\usage{
SingleExIPlot(
data,
idents,
split = NULL,
type = "violin",
sort = FALSE,
y.max = NULL,
adjust = 1,
pt.size = 0,
alpha = 1,
cols = NULL,
seed.use = 42,
log = FALSE,
add.noise = TRUE,
raster = NULL
)
}
\arguments{
\item{data}{Data to plot}
\item{idents}{Idents to use}
\item{split}{Use a split violin plot}
\item{type}{Make either a \dQuote{ridge} or \dQuote{violin} plot}
\item{sort}{Sort identity classes (on the x-axis) by the average
expression of the attribute being potted}
\item{y.max}{Maximum Y value to plot}
\item{adjust}{Adjust parameter for geom_violin}
\item{pt.size}{Size of points for violin plots}
\item{alpha}{Alpha vlaue for violin plots}
\item{cols}{Colors to use for plotting}
\item{seed.use}{Random seed to use. If NULL, don't set a seed}
\item{log}{plot Y axis on log10 scale}
\item{add.noise}{determine if adding small noise for plotting}
\item{raster}{Convert points to raster format. Requires 'ggrastr' to be installed.
default is \code{NULL} which automatically rasterizes if ggrastr is installed and
number of points exceed 100,000.}
}
\value{
A ggplot-based Expression-by-Identity plot
}
\description{
Plot a single expression by identity on a plot
}
\keyword{internal}
Seurat/man/CellSelector.Rd 0000644 0001762 0000144 00000002340 14525500037 015137 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{CellSelector}
\alias{CellSelector}
\alias{FeatureLocator}
\title{Cell Selector}
\usage{
CellSelector(plot, object = NULL, ident = "SelectedCells", ...)
FeatureLocator(plot, ...)
}
\arguments{
\item{plot}{A ggplot2 plot}
\item{object}{An optional Seurat object; if passes, will return an object
with the identities of selected cells set to \code{ident}}
\item{ident}{An optional new identity class to assign the selected cells}
\item{...}{Ignored}
}
\value{
If \code{object} is \code{NULL}, the names of the points selected;
otherwise, a Seurat object with the selected cells identity classes set to
\code{ident}
}
\description{
Select points on a scatterplot and get information about them
}
\examples{
\dontrun{
data("pbmc_small")
plot <- DimPlot(object = pbmc_small)
# Follow instructions in the terminal to select points
cells.located <- CellSelector(plot = plot)
cells.located
# Automatically set the identity class of selected cells and return a new Seurat object
pbmc_small <- CellSelector(plot = plot, object = pbmc_small, ident = 'SelectedCells')
}
}
\seealso{
\code{\link{DimPlot}} \code{\link{FeaturePlot}}
}
\concept{visualization}
Seurat/man/UnSketchEmbeddings.Rd 0000644 0001762 0000144 00000001113 14525500037 016262 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{UnSketchEmbeddings}
\alias{UnSketchEmbeddings}
\title{Transfer embeddings from sketched cells to the full data}
\usage{
UnSketchEmbeddings(
atom.data,
atom.cells = NULL,
orig.data,
embeddings,
sketch.matrix = NULL
)
}
\arguments{
\item{atom.data}{Atom data}
\item{atom.cells}{Atom cells}
\item{orig.data}{Original data}
\item{embeddings}{Embeddings of atom cells}
\item{sketch.matrix}{Sketch matrix}
}
\description{
Transfer embeddings from sketched cells to the full data
}
Seurat/man/PercentageFeatureSet.Rd 0000644 0001762 0000144 00000003225 14525500037 016627 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{PercentageFeatureSet}
\alias{PercentageFeatureSet}
\title{Calculate the percentage of all counts that belong to a given set of features}
\usage{
PercentageFeatureSet(
object,
pattern = NULL,
features = NULL,
col.name = NULL,
assay = NULL
)
}
\arguments{
\item{object}{A Seurat object}
\item{pattern}{A regex pattern to match features against}
\item{features}{A defined feature set. If features provided, will ignore the pattern matching}
\item{col.name}{Name in meta.data column to assign. If this is not null, returns a Seurat object
with the proportion of the feature set stored in metadata.}
\item{assay}{Assay to use}
}
\value{
Returns a vector with the proportion of the feature set or if md.name is set, returns a
Seurat object with the proportion of the feature set stored in metadata.
}
\description{
This function enables you to easily calculate the percentage of all the counts belonging to a
subset of the possible features for each cell. This is useful when trying to compute the percentage
of transcripts that map to mitochondrial genes for example. The calculation here is simply the
column sum of the matrix present in the counts slot for features belonging to the set divided by
the column sum for all features times 100.
}
\examples{
data("pbmc_small")
# Calculate the proportion of transcripts mapping to mitochondrial genes
# NOTE: The pattern provided works for human gene names. You may need to adjust depending on your
# system of interest
pbmc_small[["percent.mt"]] <- PercentageFeatureSet(object = pbmc_small, pattern = "^MT-")
}
\concept{utilities}
Seurat/man/DimReduc-class.Rd 0000644 0001762 0000144 00000000706 14525500037 015362 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reexports.R
\docType{class}
\name{DimReduc-class}
\alias{DimReduc-class}
\title{The DimReduc Class}
\description{
The \code{DimReduc} object stores a dimensionality reduction taken out in
Seurat; for more details, please see the documentation in
\code{\link[SeuratObject:DimReduc]{SeuratObject}}
}
\seealso{
\code{\link[SeuratObject:DimReduc]{SeuratObject::DimReduc-class}}
}
Seurat/man/PolyFeaturePlot.Rd 0000644 0001762 0000144 00000002637 14525500037 015666 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{PolyFeaturePlot}
\alias{PolyFeaturePlot}
\title{Polygon FeaturePlot}
\usage{
PolyFeaturePlot(
object,
features,
cells = NULL,
poly.data = "spatial",
ncol = ceiling(x = length(x = features)/2),
min.cutoff = 0,
max.cutoff = NA,
common.scale = TRUE,
flip.coords = FALSE
)
}
\arguments{
\item{object}{Seurat object}
\item{features}{Vector of features to plot. Features can come from:
\itemize{
\item An \code{Assay} feature (e.g. a gene name - "MS4A1")
\item A column name from meta.data (e.g. mitochondrial percentage -
"percent.mito")
\item A column name from a \code{DimReduc} object corresponding to the
cell embedding values (e.g. the PC 1 scores - "PC_1")
}}
\item{cells}{Vector of cells to plot (default is all cells)}
\item{poly.data}{Name of the polygon dataframe in the misc slot}
\item{ncol}{Number of columns to split the plot into}
\item{min.cutoff, max.cutoff}{Vector of minimum and maximum cutoff values for each feature,
may specify quantile in the form of 'q##' where '##' is the quantile (eg, 'q1', 'q10')}
\item{common.scale}{...}
\item{flip.coords}{Flip x and y coordinates}
}
\value{
Returns a ggplot object
}
\description{
Plot cells as polygons, rather than single points. Color cells by any value
accessible by \code{\link{FetchData}}.
}
\concept{spatial}
\concept{visualization}
Seurat/man/BridgeReferenceSet-class.Rd 0000644 0001762 0000144 00000001336 14525500037 017355 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{BridgeReferenceSet-class}
\alias{BridgeReferenceSet-class}
\alias{BridgeReferenceSet}
\title{The BridgeReferenceSet Class
The BridgeReferenceSet is an output from PrepareBridgeReference}
\description{
The BridgeReferenceSet Class
The BridgeReferenceSet is an output from PrepareBridgeReference
}
\section{Slots}{
\describe{
\item{\code{bridge}}{The multi-omic object}
\item{\code{reference}}{The Reference object only containing bridge representation assay}
\item{\code{params}}{A list of parameters used in the PrepareBridgeReference}
\item{\code{command}}{Store log of parameters that were used}
}}
\concept{objects}
Seurat/man/FilterSlideSeq.Rd 0000644 0001762 0000144 00000003135 14525500037 015441 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{FilterSlideSeq}
\alias{FilterSlideSeq}
\title{Filter stray beads from Slide-seq puck}
\usage{
FilterSlideSeq(
object,
image = "image",
center = NULL,
radius = NULL,
do.plot = TRUE
)
}
\arguments{
\item{object}{Seurat object with slide-seq data}
\item{image}{Name of the image where the coordinates are stored}
\item{center}{Vector specifying the x and y coordinates for the center of the
inclusion circle}
\item{radius}{Radius of the circle of inclusion}
\item{do.plot}{Display a \code{\link{SpatialDimPlot}} with the cells being
removed labeled.}
}
\value{
Returns a Seurat object with only the subset of cells that pass the
circular filter
}
\description{
This function is useful for removing stray beads that fall outside the main
Slide-seq puck area. Essentially, it's a circular filter where you set a
center and radius defining a circle of beads to keep. If the center is not
set, it will be estimated from the bead coordinates (removing the 1st and
99th quantile to avoid skewing the center by the stray beads). By default,
this function will display a \code{\link{SpatialDimPlot}} showing which cells
were removed for easy adjustment of the center and/or radius.
}
\examples{
\dontrun{
# This example uses the ssHippo dataset which you can download
# using the SeuratData package.
library(SeuratData)
data('ssHippo')
# perform filtering of beads
ssHippo.filtered <- FilterSlideSeq(ssHippo, radius = 2300)
# This radius looks to small so increase and repeat until satisfied
}
}
\concept{objects}
\concept{spatial}
Seurat/man/ISpatialDimPlot.Rd 0000644 0001762 0000144 00000001403 14525500037 015555 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{ISpatialDimPlot}
\alias{ISpatialDimPlot}
\title{Visualize clusters spatially and interactively}
\usage{
ISpatialDimPlot(object, image = NULL, group.by = NULL, alpha = c(0.3, 1))
}
\arguments{
\item{object}{A Seurat object}
\item{image}{Name of the image to use in the plot}
\item{group.by}{Name of meta.data column to group the data by}
\item{alpha}{Controls opacity of spots. Provide as a vector specifying the
min and max for SpatialFeaturePlot. For SpatialDimPlot, provide a single
alpha value for each plot.}
}
\value{
Returns final plot as a ggplot object
}
\description{
Visualize clusters spatially and interactively
}
\concept{spatial}
\concept{visualization}
Seurat/man/FindSpatiallyVariableFeatures.Rd 0000644 0001762 0000144 00000005445 14525500037 020500 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/preprocessing.R,
% R/preprocessing5.R
\name{FindSpatiallyVariableFeatures}
\alias{FindSpatiallyVariableFeatures}
\alias{FindSpatiallyVariableFeatures.default}
\alias{FindSpatiallyVariableFeatures.Assay}
\alias{FindSpatiallyVariableFeatures.Seurat}
\alias{FindSpatiallyVariableFeatures.StdAssay}
\title{Find spatially variable features}
\usage{
FindSpatiallyVariableFeatures(object, ...)
\method{FindSpatiallyVariableFeatures}{default}(
object,
spatial.location,
selection.method = c("markvariogram", "moransi"),
r.metric = 5,
x.cuts = NULL,
y.cuts = NULL,
verbose = TRUE,
...
)
\method{FindSpatiallyVariableFeatures}{Assay}(
object,
slot = "scale.data",
spatial.location,
selection.method = c("markvariogram", "moransi"),
features = NULL,
r.metric = 5,
x.cuts = NULL,
y.cuts = NULL,
nfeatures = nfeatures,
verbose = TRUE,
...
)
\method{FindSpatiallyVariableFeatures}{Seurat}(
object,
assay = NULL,
slot = "scale.data",
features = NULL,
image = NULL,
selection.method = c("markvariogram", "moransi"),
r.metric = 5,
x.cuts = NULL,
y.cuts = NULL,
nfeatures = 2000,
verbose = TRUE,
...
)
\method{FindSpatiallyVariableFeatures}{StdAssay}(
object,
layer = "scale.data",
spatial.location,
selection.method = c("markvariogram", "moransi"),
features = NULL,
r.metric = 5,
x.cuts = NULL,
y.cuts = NULL,
nfeatures = nfeatures,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{A Seurat object, assay, or expression matrix}
\item{...}{Arguments passed to other methods}
\item{spatial.location}{Coordinates for each cell/spot/bead}
\item{selection.method}{Method for selecting spatially variable features.
\itemize{
\item \code{markvariogram}: See \code{\link{RunMarkVario}} for details
\item \code{moransi}: See \code{\link{RunMoransI}} for details.
}}
\item{r.metric}{r value at which to report the "trans" value of the mark
variogram}
\item{x.cuts}{Number of divisions to make in the x direction, helps define
the grid over which binning is performed}
\item{y.cuts}{Number of divisions to make in the y direction, helps define
the grid over which binning is performed}
\item{verbose}{Print messages and progress}
\item{slot}{Slot in the Assay to pull data from}
\item{features}{If provided, only compute on given features. Otherwise,
compute for all features.}
\item{nfeatures}{Number of features to mark as the top spatially variable.}
\item{assay}{Assay to pull the features (marks) from}
\item{image}{Name of image to pull the coordinates from}
\item{layer}{Layer in the Assay5 to pull data from}
}
\description{
Identify features whose variability in expression can be explained to some
degree by spatial location.
}
\concept{preprocessing}
\concept{spatial}
Seurat/man/IntegrationAnchorSet-class.Rd 0000644 0001762 0000144 00000000626 14525500037 017761 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{IntegrationAnchorSet-class}
\alias{IntegrationAnchorSet-class}
\alias{IntegrationAnchorSet}
\title{The IntegrationAnchorSet Class}
\description{
Inherits from the Anchorset class. Implemented mainly for method dispatch
purposes. See \code{\link{AnchorSet}} for slot details.
}
\concept{objects}
Seurat/man/CreateCategoryMatrix.Rd 0000644 0001762 0000144 00000001033 14525500037 016643 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{CreateCategoryMatrix}
\alias{CreateCategoryMatrix}
\title{Create one hot matrix for a given label}
\usage{
CreateCategoryMatrix(
labels,
method = c("aggregate", "average"),
cells.name = NULL
)
}
\arguments{
\item{labels}{A vector of labels}
\item{method}{Method to aggregate cells with the same label. Either 'aggregate' or 'average'}
\item{cells.name}{A vector of cell names}
}
\description{
Create one hot matrix for a given label
}
Seurat/man/IntegrateEmbeddings.Rd 0000644 0001762 0000144 00000010267 14525500037 016472 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/integration.R
\name{IntegrateEmbeddings}
\alias{IntegrateEmbeddings}
\alias{IntegrateEmbeddings.IntegrationAnchorSet}
\alias{IntegrateEmbeddings.TransferAnchorSet}
\title{Integrate low dimensional embeddings}
\usage{
IntegrateEmbeddings(anchorset, ...)
\method{IntegrateEmbeddings}{IntegrationAnchorSet}(
anchorset,
new.reduction.name = "integrated_dr",
reductions = NULL,
dims.to.integrate = NULL,
k.weight = 100,
weight.reduction = NULL,
sd.weight = 1,
sample.tree = NULL,
preserve.order = FALSE,
verbose = TRUE,
...
)
\method{IntegrateEmbeddings}{TransferAnchorSet}(
anchorset,
reference,
query,
query.assay = NULL,
new.reduction.name = "integrated_dr",
reductions = "pcaproject",
dims.to.integrate = NULL,
k.weight = 100,
weight.reduction = NULL,
reuse.weights.matrix = TRUE,
sd.weight = 1,
preserve.order = FALSE,
verbose = TRUE,
...
)
}
\arguments{
\item{anchorset}{An AnchorSet object}
\item{...}{Reserved for internal use}
\item{new.reduction.name}{Name for new integrated dimensional reduction.}
\item{reductions}{Name of reductions to be integrated. For a
TransferAnchorSet, this should be the name of a reduction present in the
anchorset object (for example, "pcaproject"). For an IntegrationAnchorSet,
this should be a \code{\link{DimReduc}} object containing all cells present
in the anchorset object.}
\item{dims.to.integrate}{Number of dimensions to return integrated values for}
\item{k.weight}{Number of neighbors to consider when weighting anchors}
\item{weight.reduction}{Dimension reduction to use when calculating anchor
weights. This can be one of:
\itemize{
\item{A string, specifying the name of a dimension reduction present in
all objects to be integrated}
\item{A vector of strings, specifying the name of a dimension reduction to
use for each object to be integrated}
\item{A vector of \code{\link{DimReduc}} objects, specifying the object to
use for each object in the integration}
\item{NULL, in which case the full corrected space is used for computing
anchor weights.}
}}
\item{sd.weight}{Controls the bandwidth of the Gaussian kernel for weighting}
\item{sample.tree}{Specify the order of integration. Order of integration
should be encoded in a matrix, where each row represents one of the pairwise
integration steps. Negative numbers specify a dataset, positive numbers
specify the integration results from a given row (the format of the merge
matrix included in the \code{\link{hclust}} function output). For example:
\code{matrix(c(-2, 1, -3, -1), ncol = 2)} gives:
\if{html}{\out{}}\preformatted{ [,1] [,2]
[1,] -2 -3
[2,] 1 -1
}\if{html}{\out{
}}
Which would cause dataset 2 and 3 to be integrated first, then the resulting
object integrated with dataset 1.
If NULL, the sample tree will be computed automatically.}
\item{preserve.order}{Do not reorder objects based on size for each pairwise
integration.}
\item{verbose}{Print progress bars and output}
\item{reference}{Reference object used in anchorset construction}
\item{query}{Query object used in anchorset construction}
\item{query.assay}{Name of the Assay to use from query}
\item{reuse.weights.matrix}{Can be used in conjunction with the store.weights
parameter in TransferData to reuse a precomputed weights matrix.}
}
\value{
When called on a TransferAnchorSet (from FindTransferAnchors), this
will return the query object with the integrated embeddings stored in a new
reduction. When called on an IntegrationAnchorSet (from IntegrateData), this
will return a merged object with the integrated reduction stored.
}
\description{
Perform dataset integration using a pre-computed Anchorset of specified low
dimensional representations.
}
\details{
The main steps of this procedure are identical to \code{\link{IntegrateData}}
with one key distinction. When computing the weights matrix, the distance
calculations are performed in the full space of integrated embeddings when
integrating more than two datasets, as opposed to a reduced PCA space which
is the default behavior in \code{\link{IntegrateData}}.
}
\concept{integration}
Seurat/man/SCTResults.Rd 0000644 0001762 0000144 00000002636 14525500037 014602 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/objects.R
\name{SCTResults}
\alias{SCTResults}
\alias{SCTResults<-}
\alias{SCTResults.SCTModel}
\alias{SCTResults<-.SCTModel}
\alias{SCTResults.SCTAssay}
\alias{SCTResults<-.SCTAssay}
\alias{SCTResults.Seurat}
\title{Get SCT results from an Assay}
\usage{
SCTResults(object, ...)
SCTResults(object, ...) <- value
\method{SCTResults}{SCTModel}(object, slot, ...)
\method{SCTResults}{SCTModel}(object, slot, ...) <- value
\method{SCTResults}{SCTAssay}(object, slot, model = NULL, ...)
\method{SCTResults}{SCTAssay}(object, slot, model = NULL, ...) <- value
\method{SCTResults}{Seurat}(object, assay = "SCT", slot, model = NULL, ...)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods (not used)}
\item{value}{new data to set}
\item{slot}{Which slot to pull the SCT results from}
\item{model}{Name of SCModel to pull result from. Available names can be
retrieved with \code{levels}.}
\item{assay}{Assay in the Seurat object to pull from}
}
\value{
Returns the value present in the requested slot for the requested
group. If group is not specified, returns a list of slot results for each
group unless there is only one group present (in which case it just returns
the slot directly).
}
\description{
Pull the \code{\link{SCTResults}} information from an \code{\link{SCTAssay}}
object.
}
\concept{objects}
Seurat/man/RegroupIdents.Rd 0000644 0001762 0000144 00000001133 14525500037 015350 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{RegroupIdents}
\alias{RegroupIdents}
\title{Regroup idents based on meta.data info}
\usage{
RegroupIdents(object, metadata)
}
\arguments{
\item{object}{Seurat object}
\item{metadata}{Name of metadata column}
}
\value{
A Seurat object with the active idents regrouped
}
\description{
For cells in each ident, set a new identity based on the most common value
of a specified metadata column.
}
\examples{
data("pbmc_small")
pbmc_small <- RegroupIdents(pbmc_small, metadata = "groups")
}
\concept{utilities}
Seurat/man/VizDimLoadings.Rd 0000644 0001762 0000144 00000002415 14525500037 015445 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{VizDimLoadings}
\alias{VizDimLoadings}
\title{Visualize Dimensional Reduction genes}
\usage{
VizDimLoadings(
object,
dims = 1:5,
nfeatures = 30,
col = "blue",
reduction = "pca",
projected = FALSE,
balanced = FALSE,
ncol = NULL,
combine = TRUE
)
}
\arguments{
\item{object}{Seurat object}
\item{dims}{Number of dimensions to display}
\item{nfeatures}{Number of genes to display}
\item{col}{Color of points to use}
\item{reduction}{Reduction technique to visualize results for}
\item{projected}{Use reduction values for full dataset (i.e. projected
dimensional reduction values)}
\item{balanced}{Return an equal number of genes with + and - scores. If
FALSE (default), returns the top genes ranked by the scores absolute values}
\item{ncol}{Number of columns to display}
\item{combine}{Combine plots into a single \code{patchwork}
ggplot object. If \code{FALSE}, return a list of ggplot objects}
}
\value{
A \code{patchwork} ggplot object if
\code{combine = TRUE}; otherwise, a list of ggplot objects
}
\description{
Visualize top genes associated with reduction components
}
\examples{
data("pbmc_small")
VizDimLoadings(object = pbmc_small)
}
\concept{visualization}
Seurat/man/GaussianSketch.Rd 0000644 0001762 0000144 00000001026 14525500037 015473 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sketching.R
\name{GaussianSketch}
\alias{GaussianSketch}
\title{Gaussian sketching}
\usage{
GaussianSketch(nsketch, ncells, seed = NA_integer_, ...)
}
\arguments{
\item{nsketch}{Number of sketching random cells}
\item{ncells}{Number of cells in the original data}
\item{seed}{a single value, interpreted as an integer, or \code{NULL}
(see \sQuote{Details}).}
\item{...}{Ignored}
}
\value{
...
}
\description{
Gaussian sketching
}
\keyword{internal}
Seurat/man/AugmentPlot.Rd 0000644 0001762 0000144 00000001516 14525500037 015022 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{AugmentPlot}
\alias{AugmentPlot}
\title{Augments ggplot2-based plot with a PNG image.}
\usage{
AugmentPlot(plot, width = 10, height = 10, dpi = 100)
}
\arguments{
\item{plot}{A ggplot object}
\item{width, height}{Width and height of PNG version of plot}
\item{dpi}{Plot resolution}
}
\value{
A ggplot object
}
\description{
Creates "vector-friendly" plots. Does this by saving a copy of the plot as a PNG file,
then adding the PNG image with \code{\link[ggplot2]{annotation_raster}} to a blank plot
of the same dimensions as \code{plot}. Please note: original legends and axes will be lost
during augmentation.
}
\examples{
\dontrun{
data("pbmc_small")
plot <- DimPlot(object = pbmc_small)
AugmentPlot(plot = plot)
}
}
\concept{visualization}
Seurat/man/FetchResiduals.Rd 0000644 0001762 0000144 00000003305 14525500037 015466 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing5.R
\name{FetchResiduals}
\alias{FetchResiduals}
\title{Calculate pearson residuals of features not in the scale.data}
\usage{
FetchResiduals(
object,
features,
assay = NULL,
umi.assay = "RNA",
layer = "counts",
clip.range = NULL,
reference.SCT.model = NULL,
replace.value = FALSE,
na.rm = TRUE,
verbose = TRUE
)
}
\arguments{
\item{object}{A seurat object}
\item{features}{Name of features to add into the scale.data}
\item{assay}{Name of the assay of the seurat object generated by SCTransform}
\item{umi.assay}{Name of the assay of the seurat object containing UMI matrix
and the default is RNA}
\item{layer}{Name (prefix) of the layer to pull counts from}
\item{clip.range}{Numeric of length two specifying the min and max values the
Pearson residual will be clipped to}
\item{reference.SCT.model}{reference.SCT.model If a reference SCT model should be used
for calculating the residuals. When set to not NULL, ignores the `SCTModel`
paramater.}
\item{replace.value}{Recalculate residuals for all features, even if they are
already present. Useful if you want to change the clip.range.}
\item{na.rm}{For features where there is no feature model stored, return NA
for residual value in scale.data when na.rm = FALSE. When na.rm is TRUE, only
return residuals for features with a model stored for all cells.}
\item{verbose}{Whether to print messages and progress bars}
}
\value{
Returns a Seurat object containing Pearson residuals of added
features in its scale.data
}
\description{
This function calls sctransform::get_residuals.
}
\seealso{
\code{\link[sctransform]{get_residuals}}
}
\concept{preprocessing}
Seurat/man/DEenrichRPlot.Rd 0000644 0001762 0000144 00000010547 14525500037 015231 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mixscape.R
\name{DEenrichRPlot}
\alias{DEenrichRPlot}
\title{DE and EnrichR pathway visualization barplot}
\usage{
DEenrichRPlot(
object,
ident.1 = NULL,
ident.2 = NULL,
balanced = TRUE,
logfc.threshold = 0.25,
assay = NULL,
max.genes,
test.use = "wilcox",
p.val.cutoff = 0.05,
cols = NULL,
enrich.database = NULL,
num.pathway = 10,
return.gene.list = FALSE,
...
)
}
\arguments{
\item{object}{Name of object class Seurat.}
\item{ident.1}{Cell class identity 1.}
\item{ident.2}{Cell class identity 2.}
\item{balanced}{Option to display pathway enrichments for both negative and
positive DE genes.If false, only positive DE gene will be displayed.}
\item{logfc.threshold}{Limit testing to genes which show, on average, at least
X-fold difference (log-scale) between the two groups of cells. Default is 0.1
Increasing logfc.threshold speeds up the function, but can miss weaker signals.}
\item{assay}{Assay to use in differential expression testing}
\item{max.genes}{Maximum number of genes to use as input to enrichR.}
\item{test.use}{Denotes which test to use. Available options are:
\itemize{
\item{"wilcox"} : Identifies differentially expressed genes between two
groups of cells using a Wilcoxon Rank Sum test (default); will use a fast
implementation by Presto if installed
\item{"wilcox_limma"} : Identifies differentially expressed genes between two
groups of cells using the limma implementation of the Wilcoxon Rank Sum test;
set this option to reproduce results from Seurat v4
\item{"bimod"} : Likelihood-ratio test for single cell gene expression,
(McDavid et al., Bioinformatics, 2013)
\item{"roc"} : Identifies 'markers' of gene expression using ROC analysis.
For each gene, evaluates (using AUC) a classifier built on that gene alone,
to classify between two groups of cells. An AUC value of 1 means that
expression values for this gene alone can perfectly classify the two
groupings (i.e. Each of the cells in cells.1 exhibit a higher level than
each of the cells in cells.2). An AUC value of 0 also means there is perfect
classification, but in the other direction. A value of 0.5 implies that
the gene has no predictive power to classify the two groups. Returns a
'predictive power' (abs(AUC-0.5) * 2) ranked matrix of putative differentially
expressed genes.
\item{"t"} : Identify differentially expressed genes between two groups of
cells using the Student's t-test.
\item{"negbinom"} : Identifies differentially expressed genes between two
groups of cells using a negative binomial generalized linear model.
Use only for UMI-based datasets
\item{"poisson"} : Identifies differentially expressed genes between two
groups of cells using a poisson generalized linear model.
Use only for UMI-based datasets
\item{"LR"} : Uses a logistic regression framework to determine differentially
expressed genes. Constructs a logistic regression model predicting group
membership based on each feature individually and compares this to a null
model with a likelihood ratio test.
\item{"MAST"} : Identifies differentially expressed genes between two groups
of cells using a hurdle model tailored to scRNA-seq data. Utilizes the MAST
package to run the DE testing.
\item{"DESeq2"} : Identifies differentially expressed genes between two groups
of cells based on a model using DESeq2 which uses a negative binomial
distribution (Love et al, Genome Biology, 2014).This test does not support
pre-filtering of genes based on average difference (or percent detection rate)
between cell groups. However, genes may be pre-filtered based on their
minimum detection rate (min.pct) across both cell groups. To use this method,
please install DESeq2, using the instructions at
https://bioconductor.org/packages/release/bioc/html/DESeq2.html
}}
\item{p.val.cutoff}{Cutoff to select DE genes.}
\item{cols}{A list of colors to use for barplots.}
\item{enrich.database}{Database to use from enrichR.}
\item{num.pathway}{Number of pathways to display in barplot.}
\item{return.gene.list}{Return list of DE genes}
\item{...}{Arguments passed to other methods and to specific DE methods}
}
\value{
Returns one (only enriched) or two (both enriched and depleted)
barplots with the top enriched/depleted GO terms from EnrichR.
}
\description{
DE and EnrichR pathway visualization barplot
}
\concept{mixscape}
Seurat/man/SketchData.Rd 0000644 0001762 0000144 00000003077 14525500037 014602 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sketching.R
\name{SketchData}
\alias{SketchData}
\title{Sketch Data}
\usage{
SketchData(
object,
assay = NULL,
ncells = 5000L,
sketched.assay = "sketch",
method = c("LeverageScore", "Uniform"),
var.name = "leverage.score",
over.write = FALSE,
seed = 123L,
cast = "dgCMatrix",
verbose = TRUE,
...
)
}
\arguments{
\item{object}{A Seurat object.}
\item{assay}{Assay name. Default is NULL, in which case the default assay of the object is used.}
\item{ncells}{A positive integer indicating the number of cells to sample for the sketching. Default is 5000.}
\item{sketched.assay}{Sketched assay name. A sketch assay is created or overwrite with the sketch data. Default is 'sketch'.}
\item{method}{Sketching method to use. Can be 'LeverageScore' or 'Uniform'.
Default is 'LeverageScore'.}
\item{var.name}{A metadata column name to store the leverage scores. Default is 'leverage.score'.}
\item{over.write}{whether to overwrite existing column in the metadata. Default is FALSE.}
\item{seed}{A positive integer for the seed of the random number generator. Default is 123.}
\item{cast}{The type to cast the resulting assay to. Default is 'dgCMatrix'.}
\item{verbose}{Print progress and diagnostic messages}
\item{...}{Arguments passed to other methods}
}
\value{
A Seurat object with the sketched data added as a new assay.
}
\description{
This function uses sketching methods to downsample high-dimensional single-cell RNA expression data,
which can help with scalability for large datasets.
}
Seurat/man/ReadAkoya.Rd 0000644 0001762 0000144 00000005417 14525500037 014427 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R, R/convenience.R
\name{ReadAkoya}
\alias{ReadAkoya}
\alias{LoadAkoya}
\title{Read and Load Akoya CODEX data}
\usage{
ReadAkoya(
filename,
type = c("inform", "processor", "qupath"),
filter = "DAPI|Blank|Empty",
inform.quant = c("mean", "total", "min", "max", "std")
)
LoadAkoya(
filename,
type = c("inform", "processor", "qupath"),
fov,
assay = "Akoya",
...
)
}
\arguments{
\item{filename}{Path to matrix generated by upstream processing.}
\item{type}{Specify which type matrix is being provided.
\itemize{
\item \dQuote{\code{processor}}: matrix generated by CODEX Processor
\item \dQuote{\code{inform}}: matrix generated by inForm
\item \dQuote{\code{qupath}}: matrix generated by QuPath
}}
\item{filter}{A pattern to filter features by; pass \code{NA} to
skip feature filtering}
\item{inform.quant}{When \code{type} is \dQuote{\code{inform}}, the
quantification level to read in}
\item{fov}{Name to store FOV as}
\item{assay}{Name to store expression matrix as}
\item{...}{
Arguments passed on to \code{\link[=ReadAkoya]{ReadAkoya}}
\describe{
\item{\code{}}{}
}}
}
\value{
\code{ReadAkoya}: A list with some combination of the following values
\itemize{
\item \dQuote{\code{matrix}}: a
\link[Matrix:dgCMatrix-class]{sparse matrix} with expression data; cells
are columns and features are rows
\item \dQuote{\code{centroids}}: a data frame with cell centroid
coordinates in three columns: \dQuote{x}, \dQuote{y}, and \dQuote{cell}
\item \dQuote{\code{metadata}}: a data frame with cell-level meta data;
includes all columns in \code{filename} that aren't in
\dQuote{\code{matrix}} or \dQuote{\code{centroids}}
}
When \code{type} is \dQuote{\code{inform}}, additional expression matrices
are returned and named using their segmentation type (eg.
\dQuote{nucleus}, \dQuote{membrane}). The \dQuote{Entire Cell} segmentation
type is returned in the \dQuote{\code{matrix}} entry of the list
\code{LoadAkoya}: A \code{\link[SeuratObject]{Seurat}} object
}
\description{
Read and Load Akoya CODEX data
}
\note{
This function requires the
\href{https://cran.r-project.org/package=data.table}{\pkg{data.table}} package
to be installed
}
\section{Progress Updates with \pkg{progressr}}{
This function uses
\href{https://cran.r-project.org/package=progressr}{\pkg{progressr}} to
render status updates and progress bars. To enable progress updates, wrap
the function call in \code{\link[progressr]{with_progress}} or run
\code{\link[progressr:handlers]{handlers(global = TRUE)}} before running
this function. For more details about \pkg{progressr}, please read
\href{https://progressr.futureverse.org/articles/progressr-intro.html}{\code{vignette("progressr-intro")}}
}
\concept{preprocessing}
Seurat/man/ScaleData.Rd 0000644 0001762 0000144 00000007442 14525500037 014410 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/preprocessing.R
\name{ScaleData}
\alias{ScaleData}
\alias{ScaleData.default}
\alias{ScaleData.IterableMatrix}
\alias{ScaleData.Assay}
\alias{ScaleData.Seurat}
\title{Scale and center the data.}
\usage{
ScaleData(object, ...)
\method{ScaleData}{default}(
object,
features = NULL,
vars.to.regress = NULL,
latent.data = NULL,
split.by = NULL,
model.use = "linear",
use.umi = FALSE,
do.scale = TRUE,
do.center = TRUE,
scale.max = 10,
block.size = 1000,
min.cells.to.block = 3000,
verbose = TRUE,
...
)
\method{ScaleData}{IterableMatrix}(
object,
features = NULL,
do.scale = TRUE,
do.center = TRUE,
scale.max = 10,
...
)
\method{ScaleData}{Assay}(
object,
features = NULL,
vars.to.regress = NULL,
latent.data = NULL,
split.by = NULL,
model.use = "linear",
use.umi = FALSE,
do.scale = TRUE,
do.center = TRUE,
scale.max = 10,
block.size = 1000,
min.cells.to.block = 3000,
verbose = TRUE,
...
)
\method{ScaleData}{Seurat}(
object,
features = NULL,
assay = NULL,
vars.to.regress = NULL,
split.by = NULL,
model.use = "linear",
use.umi = FALSE,
do.scale = TRUE,
do.center = TRUE,
scale.max = 10,
block.size = 1000,
min.cells.to.block = 3000,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods}
\item{features}{Vector of features names to scale/center. Default is variable features.}
\item{vars.to.regress}{Variables to regress out (previously latent.vars in
RegressOut). For example, nUMI, or percent.mito.}
\item{latent.data}{Extra data to regress out, should be cells x latent data}
\item{split.by}{Name of variable in object metadata or a vector or factor defining
grouping of cells. See argument \code{f} in \code{\link[base]{split}} for more details}
\item{model.use}{Use a linear model or generalized linear model
(poisson, negative binomial) for the regression. Options are 'linear'
(default), 'poisson', and 'negbinom'}
\item{use.umi}{Regress on UMI count data. Default is FALSE for linear
modeling, but automatically set to TRUE if model.use is 'negbinom' or 'poisson'}
\item{do.scale}{Whether to scale the data.}
\item{do.center}{Whether to center the data.}
\item{scale.max}{Max value to return for scaled data. The default is 10.
Setting this can help reduce the effects of features that are only expressed in
a very small number of cells. If regressing out latent variables and using a
non-linear model, the default is 50.}
\item{block.size}{Default size for number of features to scale at in a single
computation. Increasing block.size may speed up calculations but at an
additional memory cost.}
\item{min.cells.to.block}{If object contains fewer than this number of cells,
don't block for scaling calculations.}
\item{verbose}{Displays a progress bar for scaling procedure}
\item{assay}{Name of Assay to scale}
}
\description{
Scales and centers features in the dataset. If variables are provided in vars.to.regress,
they are individually regressed against each feature, and the resulting residuals are
then scaled and centered.
}
\details{
ScaleData now incorporates the functionality of the function formerly known
as RegressOut (which regressed out given the effects of provided variables
and then scaled the residuals). To make use of the regression functionality,
simply pass the variables you want to remove to the vars.to.regress parameter.
Setting center to TRUE will center the expression for each feature by subtracting
the average expression for that feature. Setting scale to TRUE will scale the
expression level for each feature by dividing the centered feature expression
levels by their standard deviations if center is TRUE and by their root mean
square otherwise.
}
\concept{preprocessing}
Seurat/man/Neighbor-class.Rd 0000644 0001762 0000144 00000000564 14525500037 015425 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reexports.R
\docType{class}
\name{Neighbor-class}
\alias{Neighbor-class}
\title{The Neighbor Class}
\description{
For more details, please see the documentation in
\code{\link[SeuratObject:Neighbor]{SeuratObject}}
}
\seealso{
\code{\link[SeuratObject:Neighbor]{SeuratObject::Neighbor-class}}
}
Seurat/man/ReadSTARsolo.Rd 0000644 0001762 0000144 00000000616 14525500037 015025 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convenience.R
\name{ReadSTARsolo}
\alias{ReadSTARsolo}
\title{Read output from STARsolo}
\usage{
ReadSTARsolo(data.dir, ...)
}
\arguments{
\item{data.dir}{Directory containing the data files}
\item{...}{Extra parameters passed to \code{\link{ReadMtx}}}
}
\description{
Read output from STARsolo
}
\concept{convenience}
Seurat/man/SpatialPlot.Rd 0000644 0001762 0000144 00000013463 14525500037 015023 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R, R/convenience.R
\name{SpatialPlot}
\alias{SpatialPlot}
\alias{SpatialDimPlot}
\alias{SpatialFeaturePlot}
\title{Visualize spatial clustering and expression data.}
\usage{
SpatialPlot(
object,
group.by = NULL,
features = NULL,
images = NULL,
cols = NULL,
image.alpha = 1,
crop = TRUE,
slot = "data",
keep.scale = "feature",
min.cutoff = NA,
max.cutoff = NA,
cells.highlight = NULL,
cols.highlight = c("#DE2D26", "grey50"),
facet.highlight = FALSE,
label = FALSE,
label.size = 5,
label.color = "white",
label.box = TRUE,
repel = FALSE,
ncol = NULL,
combine = TRUE,
pt.size.factor = 1.6,
alpha = c(1, 1),
stroke = 0.25,
interactive = FALSE,
do.identify = FALSE,
identify.ident = NULL,
do.hover = FALSE,
information = NULL
)
SpatialDimPlot(
object,
group.by = NULL,
images = NULL,
cols = NULL,
crop = TRUE,
cells.highlight = NULL,
cols.highlight = c("#DE2D26", "grey50"),
facet.highlight = FALSE,
label = FALSE,
label.size = 7,
label.color = "white",
repel = FALSE,
ncol = NULL,
combine = TRUE,
pt.size.factor = 1.6,
alpha = c(1, 1),
image.alpha = 1,
stroke = 0.25,
label.box = TRUE,
interactive = FALSE,
information = NULL
)
SpatialFeaturePlot(
object,
features,
images = NULL,
crop = TRUE,
slot = "data",
keep.scale = "feature",
min.cutoff = NA,
max.cutoff = NA,
ncol = NULL,
combine = TRUE,
pt.size.factor = 1.6,
alpha = c(1, 1),
image.alpha = 1,
stroke = 0.25,
interactive = FALSE,
information = NULL
)
}
\arguments{
\item{object}{A Seurat object}
\item{group.by}{Name of meta.data column to group the data by}
\item{features}{Name of the feature to visualize. Provide either group.by OR
features, not both.}
\item{images}{Name of the images to use in the plot(s)}
\item{cols}{Vector of colors, each color corresponds to an identity class.
This may also be a single character or numeric value corresponding to a
palette as specified by \code{\link[RColorBrewer]{brewer.pal.info}}. By
default, ggplot2 assigns colors}
\item{image.alpha}{Adjust the opacity of the background images. Set to 0 to
remove.}
\item{crop}{Crop the plot in to focus on points plotted. Set to \code{FALSE} to show
entire background image.}
\item{slot}{If plotting a feature, which data slot to pull from (counts,
data, or scale.data)}
\item{keep.scale}{How to handle the color scale across multiple plots. Options are:
\itemize{
\item \dQuote{feature} (default; by row/feature scaling): The plots for
each individual feature are scaled to the maximum expression of the
feature across the conditions provided to \code{split.by}
\item \dQuote{all} (universal scaling): The plots for all features and
conditions are scaled to the maximum expression value for the feature
with the highest overall expression
\item \code{NULL} (no scaling): Each individual plot is scaled to the
maximum expression value of the feature in the condition provided to
\code{split.by}; be aware setting \code{NULL} will result in color
scales that are not comparable between plots
}}
\item{min.cutoff, max.cutoff}{Vector of minimum and maximum cutoff
values for each feature, may specify quantile in the form of 'q##' where '##'
is the quantile (eg, 'q1', 'q10')}
\item{cells.highlight}{A list of character or numeric vectors of cells to
highlight. If only one group of cells desired, can simply pass a vector
instead of a list. If set, colors selected cells to the color(s) in
cols.highlight}
\item{cols.highlight}{A vector of colors to highlight the cells as; ordered
the same as the groups in cells.highlight; last color corresponds to
unselected cells.}
\item{facet.highlight}{When highlighting certain groups of cells, split each
group into its own plot}
\item{label}{Whether to label the clusters}
\item{label.size}{Sets the size of the labels}
\item{label.color}{Sets the color of the label text}
\item{label.box}{Whether to put a box around the label text (geom_text vs
geom_label)}
\item{repel}{Repels the labels to prevent overlap}
\item{ncol}{Number of columns if plotting multiple plots}
\item{combine}{Combine plots into a single gg object; note that if TRUE;
themeing will not work when plotting multiple features/groupings}
\item{pt.size.factor}{Scale the size of the spots.}
\item{alpha}{Controls opacity of spots. Provide as a vector specifying the
min and max for SpatialFeaturePlot. For SpatialDimPlot, provide a single
alpha value for each plot.}
\item{stroke}{Control the width of the border around the spots}
\item{interactive}{Launch an interactive SpatialDimPlot or SpatialFeaturePlot
session, see \code{\link{ISpatialDimPlot}} or
\code{\link{ISpatialFeaturePlot}} for more details}
\item{do.identify, do.hover}{DEPRECATED in favor of \code{interactive}}
\item{identify.ident}{DEPRECATED}
\item{information}{An optional dataframe or matrix of extra information to be displayed on hover}
}
\value{
If \code{do.identify}, either a vector of cells selected or the object
with selected cells set to the value of \code{identify.ident} (if set). Else,
if \code{do.hover}, a plotly object with interactive graphics. Else, a ggplot
object
}
\description{
SpatialPlot plots a feature or discrete grouping (e.g. cluster assignments) as
spots over the image that was collected. We also provide SpatialFeaturePlot
and SpatialDimPlot as wrapper functions around SpatialPlot for a consistent
naming framework.
}
\examples{
\dontrun{
# For functionality analagous to FeaturePlot
SpatialPlot(seurat.object, features = "MS4A1")
SpatialFeaturePlot(seurat.object, features = "MS4A1")
# For functionality analagous to DimPlot
SpatialPlot(seurat.object, group.by = "clusters")
SpatialDimPlot(seurat.object, group.by = "clusters")
}
}
\concept{convenience}
\concept{spatial}
\concept{visualization}
Seurat/man/BuildClusterTree.Rd 0000644 0001762 0000144 00000004014 14525500037 016000 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree.R
\name{BuildClusterTree}
\alias{BuildClusterTree}
\title{Phylogenetic Analysis of Identity Classes}
\usage{
BuildClusterTree(
object,
assay = NULL,
features = NULL,
dims = NULL,
reduction = "pca",
graph = NULL,
slot = "data",
reorder = FALSE,
reorder.numeric = FALSE,
verbose = TRUE
)
}
\arguments{
\item{object}{Seurat object}
\item{assay}{Assay to use for the analysis.}
\item{features}{Genes to use for the analysis. Default is the set of
variable genes (\code{VariableFeatures(object = object)})}
\item{dims}{If set, tree is calculated in dimension reduction space;
overrides \code{features}}
\item{reduction}{Name of dimension reduction to use. Only used if \code{dims}
is not NULL.}
\item{graph}{If graph is passed, build tree based on graph connectivity between
clusters; overrides \code{dims} and \code{features}}
\item{slot}{(Deprecated). Slots(s) to use}
\item{reorder}{Re-order identity classes (factor ordering), according to
position on the tree. This groups similar classes together which can be
helpful, for example, when drawing violin plots.}
\item{reorder.numeric}{Re-order identity classes according to position on
the tree, assigning a numeric value ('1' is the leftmost node)}
\item{verbose}{Show progress updates}
}
\value{
A Seurat object where the cluster tree can be accessed with \code{\link{Tool}}
}
\description{
Constructs a phylogenetic tree relating the 'average' cell from each
identity class. Tree is estimated based on a distance matrix constructed in
either gene expression space or PCA space.
}
\details{
Note that the tree is calculated for an 'average' cell, so gene expression
or PC scores are averaged across all cells in an identity class before the
tree is constructed.
}
\examples{
\dontrun{
if (requireNamespace("ape", quietly = TRUE)) {
data("pbmc_small")
pbmc_small
pbmc_small <- BuildClusterTree(object = pbmc_small)
Tool(object = pbmc_small, slot = 'BuildClusterTree')
}
}
}
\concept{tree}
Seurat/man/TopCells.Rd 0000644 0001762 0000144 00000001572 14525500037 014312 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{TopCells}
\alias{TopCells}
\title{Find cells with highest scores for a given dimensional reduction technique}
\usage{
TopCells(object, dim = 1, ncells = 20, balanced = FALSE, ...)
}
\arguments{
\item{object}{DimReduc object}
\item{dim}{Dimension to use}
\item{ncells}{Number of cells to return}
\item{balanced}{Return an equal number of cells with both + and - scores.}
\item{...}{Extra parameters passed to \code{\link{Embeddings}}}
}
\value{
Returns a vector of cells
}
\description{
Return a list of genes with the strongest contribution to a set of components
}
\examples{
data("pbmc_small")
pbmc_small
head(TopCells(object = pbmc_small[["pca"]]))
# Can specify which dimension and how many cells to return
TopCells(object = pbmc_small[["pca"]], dim = 2, ncells = 5)
}
\concept{objects}
Seurat/man/FindBridgeIntegrationAnchors.Rd 0000644 0001762 0000144 00000004162 14525500037 020302 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{FindBridgeIntegrationAnchors}
\alias{FindBridgeIntegrationAnchors}
\title{Find integration bridge anchors between query and extended bridge-reference}
\usage{
FindBridgeIntegrationAnchors(
extended.reference,
query,
query.assay = NULL,
dims = 1:30,
scale = FALSE,
reduction = c("lsiproject", "pcaproject"),
integration.reduction = c("direct", "cca"),
verbose = TRUE
)
}
\arguments{
\item{extended.reference}{BridgeReferenceSet object generated from
\code{\link{PrepareBridgeReference}}}
\item{query}{A query Seurat object}
\item{query.assay}{Assay name for query-bridge integration}
\item{dims}{Number of dimensions for query-bridge integration}
\item{scale}{Determine if scale the query data for projection}
\item{reduction}{Dimensional reduction to perform when finding anchors.
Options are:
\itemize{
\item{pcaproject: Project the PCA from the bridge onto the query. We
recommend using PCA when bridge and query datasets are from scRNA-seq}
\item{lsiproject: Project the LSI from the bridge onto the query. We
recommend using LSI when bridge and query datasets are from scATAC-seq or scCUT&TAG data.
This requires that LSI or supervised LSI has been computed for the bridge dataset, and the
same features (eg, peaks or genome bins) are present in both the bridge
and query.
}
}}
\item{integration.reduction}{Dimensional reduction to perform when finding anchors
between query and reference.
Options are:
\itemize{
\item{direct: find anchors directly on the bridge representation space}
\item{cca: perform cca on the on the bridge representation space and then find anchors
}
}}
\item{verbose}{Print messages and progress}
}
\value{
Returns an \code{AnchorSet} object that can be used as input to
\code{\link{IntegrateEmbeddings}}.
}
\description{
Find a set of anchors between unimodal query and the other unimodal reference
using a pre-computed \code{\link{BridgeReferenceSet}}.
These integration anchors can later be used to integrate query and reference
using the \code{\link{IntegrateEmbeddings}} object.
}
Seurat/man/FindAllMarkers.Rd 0000644 0001762 0000144 00000015026 14525500037 015422 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/differential_expression.R
\name{FindAllMarkers}
\alias{FindAllMarkers}
\alias{FindAllMarkersNode}
\title{Gene expression markers for all identity classes}
\usage{
FindAllMarkers(
object,
assay = NULL,
features = NULL,
logfc.threshold = 0.1,
test.use = "wilcox",
slot = "data",
min.pct = 0.01,
min.diff.pct = -Inf,
node = NULL,
verbose = TRUE,
only.pos = FALSE,
max.cells.per.ident = Inf,
random.seed = 1,
latent.vars = NULL,
min.cells.feature = 3,
min.cells.group = 3,
mean.fxn = NULL,
fc.name = NULL,
base = 2,
return.thresh = 0.01,
densify = FALSE,
...
)
}
\arguments{
\item{object}{An object}
\item{assay}{Assay to use in differential expression testing}
\item{features}{Genes to test. Default is to use all genes}
\item{logfc.threshold}{Limit testing to genes which show, on average, at least
X-fold difference (log-scale) between the two groups of cells. Default is 0.1
Increasing logfc.threshold speeds up the function, but can miss weaker signals.}
\item{test.use}{Denotes which test to use. Available options are:
\itemize{
\item{"wilcox"} : Identifies differentially expressed genes between two
groups of cells using a Wilcoxon Rank Sum test (default); will use a fast
implementation by Presto if installed
\item{"wilcox_limma"} : Identifies differentially expressed genes between two
groups of cells using the limma implementation of the Wilcoxon Rank Sum test;
set this option to reproduce results from Seurat v4
\item{"bimod"} : Likelihood-ratio test for single cell gene expression,
(McDavid et al., Bioinformatics, 2013)
\item{"roc"} : Identifies 'markers' of gene expression using ROC analysis.
For each gene, evaluates (using AUC) a classifier built on that gene alone,
to classify between two groups of cells. An AUC value of 1 means that
expression values for this gene alone can perfectly classify the two
groupings (i.e. Each of the cells in cells.1 exhibit a higher level than
each of the cells in cells.2). An AUC value of 0 also means there is perfect
classification, but in the other direction. A value of 0.5 implies that
the gene has no predictive power to classify the two groups. Returns a
'predictive power' (abs(AUC-0.5) * 2) ranked matrix of putative differentially
expressed genes.
\item{"t"} : Identify differentially expressed genes between two groups of
cells using the Student's t-test.
\item{"negbinom"} : Identifies differentially expressed genes between two
groups of cells using a negative binomial generalized linear model.
Use only for UMI-based datasets
\item{"poisson"} : Identifies differentially expressed genes between two
groups of cells using a poisson generalized linear model.
Use only for UMI-based datasets
\item{"LR"} : Uses a logistic regression framework to determine differentially
expressed genes. Constructs a logistic regression model predicting group
membership based on each feature individually and compares this to a null
model with a likelihood ratio test.
\item{"MAST"} : Identifies differentially expressed genes between two groups
of cells using a hurdle model tailored to scRNA-seq data. Utilizes the MAST
package to run the DE testing.
\item{"DESeq2"} : Identifies differentially expressed genes between two groups
of cells based on a model using DESeq2 which uses a negative binomial
distribution (Love et al, Genome Biology, 2014).This test does not support
pre-filtering of genes based on average difference (or percent detection rate)
between cell groups. However, genes may be pre-filtered based on their
minimum detection rate (min.pct) across both cell groups. To use this method,
please install DESeq2, using the instructions at
https://bioconductor.org/packages/release/bioc/html/DESeq2.html
}}
\item{slot}{Slot to pull data from; note that if \code{test.use} is "negbinom", "poisson", or "DESeq2",
\code{slot} will be set to "counts"}
\item{min.pct}{only test genes that are detected in a minimum fraction of
min.pct cells in either of the two populations. Meant to speed up the function
by not testing genes that are very infrequently expressed. Default is 0.01}
\item{min.diff.pct}{only test genes that show a minimum difference in the
fraction of detection between the two groups. Set to -Inf by default}
\item{node}{A node to find markers for and all its children; requires
\code{\link{BuildClusterTree}} to have been run previously; replaces \code{FindAllMarkersNode}}
\item{verbose}{Print a progress bar once expression testing begins}
\item{only.pos}{Only return positive markers (FALSE by default)}
\item{max.cells.per.ident}{Down sample each identity class to a max number.
Default is no downsampling. Not activated by default (set to Inf)}
\item{random.seed}{Random seed for downsampling}
\item{latent.vars}{Variables to test, used only when \code{test.use} is one of
'LR', 'negbinom', 'poisson', or 'MAST'}
\item{min.cells.feature}{Minimum number of cells expressing the feature in at least one
of the two groups, currently only used for poisson and negative binomial tests}
\item{min.cells.group}{Minimum number of cells in one of the groups}
\item{mean.fxn}{Function to use for fold change or average difference calculation.
If NULL, the appropriate function will be chose according to the slot used}
\item{fc.name}{Name of the fold change, average difference, or custom function column
in the output data.frame. If NULL, the fold change column will be named
according to the logarithm base (eg, "avg_log2FC"), or if using the scale.data
slot "avg_diff".}
\item{base}{The base with respect to which logarithms are computed.}
\item{return.thresh}{Only return markers that have a p-value < return.thresh, or a power > return.thresh (if the test is ROC)}
\item{densify}{Convert the sparse matrix to a dense form before running the DE test. This can provide speedups but might require higher memory; default is FALSE}
\item{...}{Arguments passed to other methods and to specific DE methods}
}
\value{
Matrix containing a ranked list of putative markers, and associated
statistics (p-values, ROC score, etc.)
}
\description{
Finds markers (differentially expressed genes) for each of the identity classes in a dataset
}
\examples{
data("pbmc_small")
# Find markers for all clusters
all.markers <- FindAllMarkers(object = pbmc_small)
head(x = all.markers)
\dontrun{
# Pass a value to node as a replacement for FindAllMarkersNode
pbmc_small <- BuildClusterTree(object = pbmc_small)
all.markers <- FindAllMarkers(object = pbmc_small, node = 4)
head(x = all.markers)
}
}
\concept{differential_expression}
Seurat/man/GroupCorrelation.Rd 0000644 0001762 0000144 00000002125 14525500037 016056 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{GroupCorrelation}
\alias{GroupCorrelation}
\title{Compute the correlation of features broken down by groups with another
covariate}
\usage{
GroupCorrelation(
object,
assay = NULL,
slot = "scale.data",
var = NULL,
group.assay = NULL,
min.cells = 5,
ngroups = 6,
do.plot = TRUE
)
}
\arguments{
\item{object}{Seurat object}
\item{assay}{Assay to pull the data from}
\item{slot}{Slot in the assay to pull feature expression data from (counts,
data, or scale.data)}
\item{var}{Variable with which to correlate the features}
\item{group.assay}{Compute the gene groups based off the data in this assay.}
\item{min.cells}{Only compute for genes in at least this many cells}
\item{ngroups}{Number of groups to split into}
\item{do.plot}{Display the group correlation boxplot (via
\code{GroupCorrelationPlot})}
}
\value{
A Seurat object with the correlation stored in metafeatures
}
\description{
Compute the correlation of features broken down by groups with another
covariate
}
\concept{utilities}
Seurat/man/GetTransferPredictions.Rd 0000644 0001762 0000144 00000002077 14525500037 017216 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{GetTransferPredictions}
\alias{GetTransferPredictions}
\title{Get the predicted identity}
\usage{
GetTransferPredictions(
object,
assay = "predictions",
slot = "data",
score.filter = 0.75
)
}
\arguments{
\item{object}{Seurat object}
\item{assay}{Name of the assay holding the predictions}
\item{slot}{Slot of the assay in which the prediction scores are stored}
\item{score.filter}{Return "Unassigned" for any cell with a score less than
this value}
}
\value{
Returns a vector of predicted class names
}
\description{
Utility function to easily pull out the name of the class with the maximum
prediction. This is useful if you've set \code{prediction.assay = TRUE} in
\code{\link{TransferData}} and want to have a vector with the predicted class.
}
\examples{
\dontrun{
prediction.assay <- TransferData(anchorset = anchors, refdata = reference$class)
query[["predictions"]] <- prediction.assay
query$predicted.id <- GetTransferPredictions(query)
}
}
\concept{integration}
Seurat/man/as.CellDataSet.Rd 0000644 0001762 0000144 00000001160 14525500037 015305 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/objects.R
\name{as.CellDataSet}
\alias{as.CellDataSet}
\alias{as.CellDataSet.Seurat}
\title{Convert objects to CellDataSet objects}
\usage{
as.CellDataSet(x, ...)
\method{as.CellDataSet}{Seurat}(x, assay = NULL, reduction = NULL, ...)
}
\arguments{
\item{x}{An object to convert to class \code{CellDataSet}}
\item{...}{Arguments passed to other methods}
\item{assay}{Assay to convert}
\item{reduction}{Name of DimReduc to set to main reducedDim in cds}
}
\description{
Convert objects to CellDataSet objects
}
\concept{objects}
Seurat/man/FindVariableFeatures.Rd 0000644 0001762 0000144 00000011276 14525500037 016614 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/preprocessing.R
\name{FindVariableFeatures}
\alias{FindVariableFeatures}
\alias{FindVariableGenes}
\alias{FindVariableFeatures.V3Matrix}
\alias{FindVariableFeatures.Assay}
\alias{FindVariableFeatures.SCTAssay}
\alias{FindVariableFeatures.Seurat}
\title{Find variable features}
\usage{
FindVariableFeatures(object, ...)
\method{FindVariableFeatures}{V3Matrix}(
object,
selection.method = "vst",
loess.span = 0.3,
clip.max = "auto",
mean.function = FastExpMean,
dispersion.function = FastLogVMR,
num.bin = 20,
binning.method = "equal_width",
verbose = TRUE,
...
)
\method{FindVariableFeatures}{Assay}(
object,
selection.method = "vst",
loess.span = 0.3,
clip.max = "auto",
mean.function = FastExpMean,
dispersion.function = FastLogVMR,
num.bin = 20,
binning.method = "equal_width",
nfeatures = 2000,
mean.cutoff = c(0.1, 8),
dispersion.cutoff = c(1, Inf),
verbose = TRUE,
...
)
\method{FindVariableFeatures}{SCTAssay}(object, nfeatures = 2000, ...)
\method{FindVariableFeatures}{Seurat}(
object,
assay = NULL,
selection.method = "vst",
loess.span = 0.3,
clip.max = "auto",
mean.function = FastExpMean,
dispersion.function = FastLogVMR,
num.bin = 20,
binning.method = "equal_width",
nfeatures = 2000,
mean.cutoff = c(0.1, 8),
dispersion.cutoff = c(1, Inf),
verbose = TRUE,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods}
\item{selection.method}{How to choose top variable features. Choose one of :
\itemize{
\item \dQuote{\code{vst}}: First, fits a line to the relationship of
log(variance) and log(mean) using local polynomial regression (loess).
Then standardizes the feature values using the observed mean and
expected variance (given by the fitted line). Feature variance is then
calculated on the standardized values
after clipping to a maximum (see clip.max parameter).
\item \dQuote{\code{mean.var.plot}} (mvp): First, uses a function to
calculate average expression (mean.function) and dispersion
(dispersion.function) for each feature. Next, divides features into
\code{num.bin} (deafult 20) bins based on their average expression,
and calculates z-scores for dispersion within each bin. The purpose of
this is to identify variable features while controlling for the
strong relationship between variability and average expression
\item \dQuote{\code{dispersion}} (disp): selects the genes with the
highest dispersion values
}}
\item{loess.span}{(vst method) Loess span parameter used when fitting the
variance-mean relationship}
\item{clip.max}{(vst method) After standardization values larger than
clip.max will be set to clip.max; default is 'auto' which sets this value to
the square root of the number of cells}
\item{mean.function}{Function to compute x-axis value (average expression).
Default is to take the mean of the detected (i.e. non-zero) values}
\item{dispersion.function}{Function to compute y-axis value (dispersion).
Default is to take the standard deviation of all values}
\item{num.bin}{Total number of bins to use in the scaled analysis (default
is 20)}
\item{binning.method}{Specifies how the bins should be computed. Available
methods are:
\itemize{
\item \dQuote{\code{equal_width}}: each bin is of equal width along the
x-axis (default)
\item \dQuote{\code{equal_frequency}}: each bin contains an equal number
of features (can increase statistical power to detect overdispersed
eatures at high expression values, at the cost of reduced resolution
along the x-axis)
}}
\item{verbose}{show progress bar for calculations}
\item{nfeatures}{Number of features to select as top variable features;
only used when \code{selection.method} is set to \code{'dispersion'} or
\code{'vst'}}
\item{mean.cutoff}{A two-length numeric vector with low- and high-cutoffs for
feature means}
\item{dispersion.cutoff}{A two-length numeric vector with low- and high-cutoffs for
feature dispersions}
\item{assay}{Assay to use}
}
\description{
Identifies features that are outliers on a 'mean variability plot'.
}
\details{
For the mean.var.plot method:
Exact parameter settings may vary empirically from dataset to dataset, and
based on visual inspection of the plot. Setting the y.cutoff parameter to 2
identifies features that are more than two standard deviations away from the
average dispersion within a bin. The default X-axis function is the mean
expression level, and for Y-axis it is the log(Variance/mean). All mean/variance
calculations are not performed in log-space, but the results are reported in
log-space - see relevant functions for exact details.
}
\concept{preprocessing}
Seurat/man/L2CCA.Rd 0000644 0001762 0000144 00000000541 14525500037 013344 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimensional_reduction.R
\name{L2CCA}
\alias{L2CCA}
\title{L2-Normalize CCA}
\usage{
L2CCA(object, ...)
}
\arguments{
\item{object}{Seurat object}
\item{\dots}{Additional parameters to L2Dim.}
}
\description{
Perform l2 normalization on CCs
}
\concept{dimensional_reduction}
Seurat/man/Read10X_probe_metadata.Rd 0000644 0001762 0000144 00000001226 14525500037 016754 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{Read10X_probe_metadata}
\alias{Read10X_probe_metadata}
\title{Read10x Probe Metadata}
\usage{
Read10X_probe_metadata(data.dir, filename = "raw_probe_bc_matrix.h5")
}
\arguments{
\item{data.dir}{The directory where the file is located.}
\item{filename}{The name of the file containing the raw probe barcode matrix in HDF5 format. The default filename is 'raw_probe_bc_matrix.h5'.}
}
\value{
Returns a data.frame containing the probe metadata.
}
\description{
This function reads the probe metadata from a 10x Genomics probe barcode matrix file in HDF5 format.
}
Seurat/man/MVP.Rd 0000644 0001762 0000144 00000001312 14525500037 013217 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing5.R
\name{MVP}
\alias{MVP}
\title{Find variable features based on mean.var.plot}
\usage{
MVP(
data,
verbose = TRUE,
nselect = 2000L,
mean.cutoff = c(0.1, 8),
dispersion.cutoff = c(1, Inf),
...
)
}
\arguments{
\item{data}{Data matrix}
\item{verbose}{Whether to print messages and progress bars}
\item{nselect}{Number of features to select based on dispersion values}
\item{mean.cutoff}{Numeric of length two specifying the min and max values}
\item{dispersion.cutoff}{Numeric of length two specifying the min and max values}
}
\description{
Find variable features based on mean.var.plot
}
\keyword{internal}
Seurat/man/ImageDimPlot.Rd 0000644 0001762 0000144 00000006405 14525500037 015100 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{ImageDimPlot}
\alias{ImageDimPlot}
\title{Spatial Cluster Plots}
\usage{
ImageDimPlot(
object,
fov = NULL,
boundaries = NULL,
group.by = NULL,
split.by = NULL,
cols = NULL,
shuffle.cols = FALSE,
size = 0.5,
molecules = NULL,
mols.size = 0.1,
mols.cols = NULL,
mols.alpha = 1,
nmols = 1000,
alpha = 1,
border.color = "white",
border.size = NULL,
na.value = "grey50",
dark.background = TRUE,
crop = FALSE,
cells = NULL,
overlap = FALSE,
axes = FALSE,
combine = TRUE,
coord.fixed = TRUE,
flip_xy = TRUE
)
}
\arguments{
\item{object}{A \code{\link[SeuratObject]{Seurat}} object}
\item{fov}{Name of FOV to plot}
\item{boundaries}{A vector of segmentation boundaries per image to plot;
can be a character vector, a named character vector, or a named list.
Names should be the names of FOVs and values should be the names of
segmentation boundaries}
\item{group.by}{Name of one or more metadata columns to group (color) cells by
(for example, orig.ident); pass 'ident' to group by identity class}
\item{split.by}{A factor in object metadata to split the plot by, pass 'ident'
to split by cell identity'}
\item{cols}{Vector of colors, each color corresponds to an identity class. This may also be a single character
or numeric value corresponding to a palette as specified by \code{\link[RColorBrewer]{brewer.pal.info}}.
By default, ggplot2 assigns colors. We also include a number of palettes from the pals package.
See \code{\link{DiscretePalette}} for details.}
\item{shuffle.cols}{Randomly shuffle colors when a palette or
vector of colors is provided to \code{cols}}
\item{size}{Point size for cells when plotting centroids}
\item{molecules}{A vector of molecules to plot}
\item{mols.size}{Point size for molecules}
\item{mols.cols}{A vector of color for molecules. The "Set1" palette from
RColorBrewer is used by default.}
\item{mols.alpha}{Alpha value for molecules, should be between 0 and 1}
\item{nmols}{Max number of each molecule specified in `molecules` to plot}
\item{alpha}{Alpha value for plotting (default is 1)}
\item{border.color}{Color of cell segmentation border; pass \code{NA}
to suppress borders for segmentation-based plots}
\item{border.size}{Thickness of cell segmentation borders; pass \code{NA}
to suppress borders for centroid-based plots}
\item{na.value}{Color value for NA points when using custom scale}
\item{dark.background}{Set plot background to black}
\item{crop}{Crop the plots to area with cells only}
\item{cells}{Vector of cells to plot (default is all cells)}
\item{overlap}{Overlay boundaries from a single image to create a single
plot; if \code{TRUE}, then boundaries are stacked in the order they're
given (first is lowest)}
\item{axes}{Keep axes and panel background}
\item{combine}{Combine plots into a single
\code{patchwork} ggplot object.If \code{FALSE},
return a list of ggplot objects}
\item{coord.fixed}{Plot cartesian coordinates with fixed aspect ratio}
\item{flip_xy}{Flag to flip X and Y axes. Default is FALSE.}
}
\value{
If \code{combine = TRUE}, a \code{patchwork}
ggplot object; otherwise, a list of ggplot objects
}
\description{
Visualize clusters or other categorical groupings in a spatial context
}
Seurat/man/RunGraphLaplacian.Rd 0000644 0001762 0000144 00000002427 14525500037 016120 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/integration.R
\name{RunGraphLaplacian}
\alias{RunGraphLaplacian}
\alias{RunGraphLaplacian.Seurat}
\alias{RunGraphLaplacian.default}
\title{Run Graph Laplacian Eigendecomposition}
\usage{
RunGraphLaplacian(object, ...)
\method{RunGraphLaplacian}{Seurat}(
object,
graph,
reduction.name = "lap",
reduction.key = "LAP_",
n = 50,
verbose = TRUE,
...
)
\method{RunGraphLaplacian}{default}(object, n = 50, reduction.key = "LAP_", verbose = TRUE, ...)
}
\arguments{
\item{object}{A Seurat object}
\item{...}{Arguments passed to eigs_sym}
\item{graph}{The name of graph}
\item{reduction.name}{dimensional reduction name, lap by default}
\item{reduction.key}{dimensional reduction key, specifies the string before
the number for the dimension names. LAP by default}
\item{n}{Total Number of Eigenvectors to compute and store (50 by default)}
\item{verbose}{Print message and process}
}
\value{
Returns Seurat object with the Graph laplacian eigenvector
calculation stored in the reductions slot
}
\description{
Run a graph laplacian dimensionality reduction. It is used as a low
dimensional representation for a cell-cell graph. The input graph
should be symmetric
}
\concept{dimensional_reduction}
Seurat/man/SetQuantile.Rd 0000644 0001762 0000144 00000001356 14525500037 015023 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{SetQuantile}
\alias{SetQuantile}
\title{Find the Quantile of Data}
\usage{
SetQuantile(cutoff, data)
}
\arguments{
\item{cutoff}{The cutoff to turn into a quantile}
\item{data}{The data to turn find the quantile of}
}
\value{
The numerical representation of the quantile
}
\description{
Converts a quantile in character form to a number regarding some data.
String form for a quantile is represented as a number prefixed with
\dQuote{q}; for example, 10th quantile is \dQuote{q10} while 2nd quantile is
\dQuote{q2}. Will only take a quantile of non-zero data values
}
\examples{
set.seed(42)
SetQuantile('q10', sample(1:100, 10))
}
\concept{utilities}
Seurat/man/AnnotateAnchors.Rd 0000644 0001762 0000144 00000002564 14525500037 015656 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/integration.R
\name{AnnotateAnchors}
\alias{AnnotateAnchors}
\alias{AnnotateAnchors.default}
\alias{AnnotateAnchors.IntegrationAnchorSet}
\alias{AnnotateAnchors.TransferAnchorSet}
\title{Add info to anchor matrix}
\usage{
AnnotateAnchors(anchors, vars, slot, ...)
\method{AnnotateAnchors}{default}(
anchors,
vars = NULL,
slot = NULL,
object.list,
assay = NULL,
...
)
\method{AnnotateAnchors}{IntegrationAnchorSet}(
anchors,
vars = NULL,
slot = NULL,
object.list = NULL,
assay = NULL,
...
)
\method{AnnotateAnchors}{TransferAnchorSet}(
anchors,
vars = NULL,
slot = NULL,
reference = NULL,
query = NULL,
assay = NULL,
...
)
}
\arguments{
\item{anchors}{An \code{\link{AnchorSet}} object}
\item{vars}{Variables to pull for each object via FetchData}
\item{slot}{Slot to pull feature data for}
\item{...}{Arguments passed to other methods}
\item{object.list}{List of Seurat objects}
\item{assay}{Specify the Assay per object if annotating with expression data}
\item{reference}{Reference object used in \code{\link{FindTransferAnchors}}}
\item{query}{Query object used in \code{\link{FindTransferAnchors}}}
}
\value{
Returns the anchor dataframe with additional columns for annotation
metadata
}
\description{
Add info to anchor matrix
}
\concept{integration}
Seurat/man/FindBridgeAnchor.Rd 0000644 0001762 0000144 00000005412 14525500037 015712 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{FindBridgeAnchor}
\alias{FindBridgeAnchor}
\title{Find bridge anchors between two unimodal datasets}
\usage{
FindBridgeAnchor(
object.list,
bridge.object,
object.reduction,
bridge.reduction,
anchor.type = c("Transfer", "Integration"),
reference = NULL,
laplacian.reduction = "lap",
laplacian.dims = 1:50,
reduction = c("direct", "cca"),
bridge.assay.name = "Bridge",
reference.bridge.stored = FALSE,
k.anchor = 20,
k.score = 50,
verbose = TRUE,
...
)
}
\arguments{
\item{object.list}{A list of Seurat objects}
\item{bridge.object}{A multi-omic bridge Seurat which is used as the basis to
represent unimodal datasets}
\item{object.reduction}{A list of dimensional reductions from object.list used
to be reconstructed by bridge.object}
\item{bridge.reduction}{A list of dimensional reductions from bridge.object used
to reconstruct object.reduction}
\item{anchor.type}{The type of anchors. Can
be one of:
\itemize{
\item{Integration: Generate IntegrationAnchors for integration}
\item{Transfer: Generate TransferAnchors for transfering data}
}}
\item{reference}{A vector specifying the object/s to be used as a reference
during integration or transfer data.}
\item{laplacian.reduction}{Name of bridge graph laplacian dimensional reduction}
\item{laplacian.dims}{Dimensions used for bridge graph laplacian dimensional reduction}
\item{reduction}{Dimensional reduction to perform when finding anchors. Can
be one of:
\itemize{
\item{cca: Canonical correlation analysis}
\item{direct: Use assay data as a dimensional reduction}
}}
\item{bridge.assay.name}{Assay name used for bridge object reconstruction value (default is 'Bridge')}
\item{reference.bridge.stored}{If refernece has stored the bridge dictionary representation}
\item{k.anchor}{How many neighbors (k) to use when picking anchors}
\item{k.score}{How many neighbors (k) to use when scoring anchors}
\item{verbose}{Print messages and progress}
\item{...}{Additional parameters passed to \code{FindIntegrationAnchors} or
\code{FindTransferAnchors}}
}
\value{
Returns an \code{\link{AnchorSet}} object that can be used as input to
\code{\link{IntegrateEmbeddings}}.or \code{\link{MapQuery}}
}
\description{
First, bridge object is used to reconstruct two single-modality profiles and
then project those cells into bridage graph laplacian space.
Next, find a set of anchors between two single-modality objects. These
anchors can later be used to integrate embeddings or transfer data from the reference to
query object using the \code{\link{MapQuery}} object.
}
\details{
\itemize{
\item{ Bridge cells reconstruction
}
\item{ Find anchors between objects. It can be either IntegrationAnchors or TransferAnchor.
}
}
}
Seurat/man/CaseMatch.Rd 0000644 0001762 0000144 00000001132 14525500037 014405 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{CaseMatch}
\alias{CaseMatch}
\title{Match the case of character vectors}
\usage{
CaseMatch(search, match)
}
\arguments{
\item{search}{A vector of search terms}
\item{match}{A vector of characters whose case should be matched}
}
\value{
Values from search present in match with the case of match
}
\description{
Match the case of character vectors
}
\examples{
data("pbmc_small")
cd_genes <- c('Cd79b', 'Cd19', 'Cd200')
CaseMatch(search = cd_genes, match = rownames(x = pbmc_small))
}
\concept{utilities}
Seurat/man/ReadParseBio.Rd 0000644 0001762 0000144 00000000640 14525500037 015060 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convenience.R
\name{ReadParseBio}
\alias{ReadParseBio}
\title{Read output from Parse Biosciences}
\usage{
ReadParseBio(data.dir, ...)
}
\arguments{
\item{data.dir}{Directory containing the data files}
\item{...}{Extra parameters passed to \code{\link{ReadMtx}}}
}
\description{
Read output from Parse Biosciences
}
\concept{convenience}
Seurat/man/as.sparse.Rd 0000644 0001762 0000144 00000002567 14525500037 014471 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R, R/utilities.R
\name{as.sparse.H5Group}
\alias{as.sparse.H5Group}
\alias{as.data.frame.Matrix}
\title{Cast to Sparse}
\usage{
\method{as.sparse}{H5Group}(x, ...)
\method{as.data.frame}{Matrix}(
x,
row.names = NULL,
optional = FALSE,
...,
stringsAsFactors = getOption(x = "stringsAsFactors", default = FALSE)
)
}
\arguments{
\item{x}{An object}
\item{...}{Arguments passed to other methods}
\item{row.names}{\code{NULL} or a character vector giving the row names for
the data; missing values are not allowed}
\item{optional}{logical. If \code{TRUE}, setting row names and
converting column names (to syntactic names: see
\code{\link[base]{make.names}}) is optional. Note that all of \R's
\pkg{base} package \code{as.data.frame()} methods use
\code{optional} only for column names treatment, basically with the
meaning of \code{\link[base]{data.frame}(*, check.names = !optional)}.
See also the \code{make.names} argument of the \code{matrix} method.}
\item{stringsAsFactors}{logical: should the character vector be converted
to a factor?}
}
\value{
\code{as.data.frame.Matrix}: A data frame representation of the S4 Matrix
}
\description{
Cast to Sparse
}
\seealso{
\code{\link[SeuratObject:as.sparse]{SeuratObject::as.sparse}}
}
\concept{objects}
\concept{utilities}
Seurat/man/MinMax.Rd 0000644 0001762 0000144 00000001262 14525500037 013752 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{MinMax}
\alias{MinMax}
\title{Apply a ceiling and floor to all values in a matrix}
\usage{
MinMax(data, min, max)
}
\arguments{
\item{data}{Matrix or data frame}
\item{min}{all values below this min value will be replaced with min}
\item{max}{all values above this max value will be replaced with max}
}
\value{
Returns matrix after performing these floor and ceil operations
}
\description{
Apply a ceiling and floor to all values in a matrix
}
\examples{
mat <- matrix(data = rbinom(n = 25, size = 20, prob = 0.2 ), nrow = 5)
mat
MinMax(data = mat, min = 4, max = 5)
}
\concept{utilities}
Seurat/man/AggregateExpression.Rd 0000644 0001762 0000144 00000004144 14525500037 016531 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{AggregateExpression}
\alias{AggregateExpression}
\title{Aggregated feature expression by identity class}
\usage{
AggregateExpression(
object,
assays = NULL,
features = NULL,
return.seurat = FALSE,
group.by = "ident",
add.ident = NULL,
normalization.method = "LogNormalize",
scale.factor = 10000,
margin = 1,
verbose = TRUE,
...
)
}
\arguments{
\item{object}{Seurat object}
\item{assays}{Which assays to use. Default is all assays}
\item{features}{Features to analyze. Default is all features in the assay}
\item{return.seurat}{Whether to return the data as a Seurat object. Default is FALSE}
\item{group.by}{Category (or vector of categories) for grouping (e.g, ident, replicate, celltype); 'ident' by default
To use multiple categories, specify a vector, such as c('ident', 'replicate', 'celltype')}
\item{add.ident}{(Deprecated). Place an additional label on each cell prior to pseudobulking}
\item{normalization.method}{Method for normalization, see \code{\link{NormalizeData}}}
\item{scale.factor}{Scale factor for normalization, see \code{\link{NormalizeData}}}
\item{margin}{Margin to perform CLR normalization, see \code{\link{NormalizeData}}}
\item{verbose}{Print messages and show progress bar}
\item{...}{Arguments to be passed to methods such as \code{\link{CreateSeuratObject}}}
}
\value{
Returns a matrix with genes as rows, identity classes as columns.
If return.seurat is TRUE, returns an object of class \code{\link{Seurat}}.
}
\description{
Returns summed counts ("pseudobulk") for each identity class.
}
\details{
If \code{return.seurat = TRUE}, aggregated values are placed in the 'counts'
layer of the returned object. The data is then normalized by running \code{\link{NormalizeData}}
on the aggregated counts. \code{\link{ScaleData}} is then run on the default assay
before returning the object.
}
\examples{
\dontrun{
data("pbmc_small")
head(AggregateExpression(object = pbmc_small)$RNA)
head(AggregateExpression(object = pbmc_small, group.by = c('ident', 'groups'))$RNA)
}
}
\concept{utilities}
Seurat/man/ImageFeaturePlot.Rd 0000644 0001762 0000144 00000010635 14525500037 015762 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{ImageFeaturePlot}
\alias{ImageFeaturePlot}
\title{Spatial Feature Plots}
\usage{
ImageFeaturePlot(
object,
features,
fov = NULL,
boundaries = NULL,
cols = if (isTRUE(x = blend)) {
c("lightgrey", "#ff0000", "#00ff00")
} else {
c("lightgrey", "firebrick1")
},
size = 0.5,
min.cutoff = NA,
max.cutoff = NA,
split.by = NULL,
molecules = NULL,
mols.size = 0.1,
mols.cols = NULL,
nmols = 1000,
alpha = 1,
border.color = "white",
border.size = NULL,
dark.background = TRUE,
blend = FALSE,
blend.threshold = 0.5,
crop = FALSE,
cells = NULL,
scale = c("feature", "all", "none"),
overlap = FALSE,
axes = FALSE,
combine = TRUE,
coord.fixed = TRUE
)
}
\arguments{
\item{object}{Seurat object}
\item{features}{Vector of features to plot. Features can come from:
\itemize{
\item An \code{Assay} feature (e.g. a gene name - "MS4A1")
\item A column name from meta.data (e.g. mitochondrial percentage -
"percent.mito")
\item A column name from a \code{DimReduc} object corresponding to the
cell embedding values (e.g. the PC 1 scores - "PC_1")
}}
\item{fov}{Name of FOV to plot}
\item{boundaries}{A vector of segmentation boundaries per image to plot;
can be a character vector, a named character vector, or a named list.
Names should be the names of FOVs and values should be the names of
segmentation boundaries}
\item{cols}{The two colors to form the gradient over. Provide as string vector with
the first color corresponding to low values, the second to high. Also accepts a Brewer
color scale or vector of colors. Note: this will bin the data into number of colors provided.
When blend is \code{TRUE}, takes anywhere from 1-3 colors:
\describe{
\item{1 color:}{Treated as color for double-negatives, will use default colors 2 and 3 for per-feature expression}
\item{2 colors:}{Treated as colors for per-feature expression, will use default color 1 for double-negatives}
\item{3+ colors:}{First color used for double-negatives, colors 2 and 3 used for per-feature expression, all others ignored}
}}
\item{size}{Point size for cells when plotting centroids}
\item{min.cutoff, max.cutoff}{Vector of minimum and maximum cutoff values for each feature,
may specify quantile in the form of 'q##' where '##' is the quantile (eg, 'q1', 'q10')}
\item{split.by}{A factor in object metadata to split the plot by, pass 'ident'
to split by cell identity'}
\item{molecules}{A vector of molecules to plot}
\item{mols.size}{Point size for molecules}
\item{mols.cols}{A vector of color for molecules. The "Set1" palette from
RColorBrewer is used by default.}
\item{nmols}{Max number of each molecule specified in `molecules` to plot}
\item{alpha}{Alpha value for plotting (default is 1)}
\item{border.color}{Color of cell segmentation border; pass \code{NA}
to suppress borders for segmentation-based plots}
\item{border.size}{Thickness of cell segmentation borders; pass \code{NA}
to suppress borders for centroid-based plots}
\item{dark.background}{Set plot background to black}
\item{blend}{Scale and blend expression values to visualize coexpression of two features}
\item{blend.threshold}{The color cutoff from weak signal to strong signal; ranges from 0 to 1.}
\item{crop}{Crop the plots to area with cells only}
\item{cells}{Vector of cells to plot (default is all cells)}
\item{scale}{Set color scaling across multiple plots; choose from:
\itemize{
\item \dQuote{\code{feature}}: Plots per-feature are scaled across splits
\item \dQuote{\code{all}}: Plots per-feature are scaled across all features
\item \dQuote{\code{none}}: Plots are not scaled; \strong{note}: setting
\code{scale} to \dQuote{\code{none}} will result in color scales that are
\emph{not} comparable between plots
}
Ignored if \code{blend = TRUE}}
\item{overlap}{Overlay boundaries from a single image to create a single
plot; if \code{TRUE}, then boundaries are stacked in the order they're
given (first is lowest)}
\item{axes}{Keep axes and panel background}
\item{combine}{Combine plots into a single \code{\link[patchwork]{patchwork}ed}
ggplot object. If \code{FALSE}, return a list of ggplot objects}
\item{coord.fixed}{Plot cartesian coordinates with fixed aspect ratio}
}
\value{
If \code{combine = TRUE}, a \code{patchwork}
ggplot object; otherwise, a list of ggplot objects
}
\description{
Visualize expression in a spatial context
}
Seurat/man/MULTIseqDemux.Rd 0000644 0001762 0000144 00000002430 14525500037 015165 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{MULTIseqDemux}
\alias{MULTIseqDemux}
\title{Demultiplex samples based on classification method from MULTI-seq (McGinnis et al., bioRxiv 2018)}
\usage{
MULTIseqDemux(
object,
assay = "HTO",
quantile = 0.7,
autoThresh = FALSE,
maxiter = 5,
qrange = seq(from = 0.1, to = 0.9, by = 0.05),
verbose = TRUE
)
}
\arguments{
\item{object}{Seurat object. Assumes that the specified assay data has been added}
\item{assay}{Name of the multiplexing assay (HTO by default)}
\item{quantile}{The quantile to use for classification}
\item{autoThresh}{Whether to perform automated threshold finding to define the best quantile. Default is FALSE}
\item{maxiter}{Maximum number of iterations if autoThresh = TRUE. Default is 5}
\item{qrange}{A range of possible quantile values to try if autoThresh = TRUE}
\item{verbose}{Prints the output}
}
\value{
A Seurat object with demultiplexing results stored at \code{object$MULTI_ID}
}
\description{
Identify singlets, doublets and negative cells from multiplexing experiments. Annotate singlets by tags.
}
\examples{
\dontrun{
object <- MULTIseqDemux(object)
}
}
\references{
\url{https://www.biorxiv.org/content/10.1101/387241v1}
}
\concept{preprocessing}
Seurat/man/CollapseEmbeddingOutliers.Rd 0000644 0001762 0000144 00000002304 14525500037 017647 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{CollapseEmbeddingOutliers}
\alias{CollapseEmbeddingOutliers}
\title{Move outliers towards center on dimension reduction plot}
\usage{
CollapseEmbeddingOutliers(
object,
reduction = "umap",
dims = 1:2,
group.by = "ident",
outlier.sd = 2,
reduction.key = "UMAP_"
)
}
\arguments{
\item{object}{Seurat object}
\item{reduction}{Name of DimReduc to adjust}
\item{dims}{Dimensions to visualize}
\item{group.by}{Group (color) cells in different ways (for example, orig.ident)}
\item{outlier.sd}{Controls the outlier distance}
\item{reduction.key}{Key for DimReduc that is returned}
}
\value{
Returns a DimReduc object with the modified embeddings
}
\description{
Move outliers towards center on dimension reduction plot
}
\examples{
\dontrun{
data("pbmc_small")
pbmc_small <- FindClusters(pbmc_small, resolution = 1.1)
pbmc_small <- RunUMAP(pbmc_small, dims = 1:5)
DimPlot(pbmc_small, reduction = "umap")
pbmc_small[["umap_new"]] <- CollapseEmbeddingOutliers(pbmc_small,
reduction = "umap", reduction.key = 'umap_', outlier.sd = 0.5)
DimPlot(pbmc_small, reduction = "umap_new")
}
}
\concept{visualization}
Seurat/man/L2Dim.Rd 0000644 0001762 0000144 00000001141 14525500037 013464 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dimensional_reduction.R
\name{L2Dim}
\alias{L2Dim}
\title{L2-normalization}
\usage{
L2Dim(object, reduction, new.dr = NULL, new.key = NULL)
}
\arguments{
\item{object}{Seurat object}
\item{reduction}{Dimensional reduction to normalize}
\item{new.dr}{name of new dimensional reduction to store
(default is olddr.l2)}
\item{new.key}{name of key for new dimensional reduction}
}
\value{
Returns a \code{\link{Seurat}} object
}
\description{
Perform l2 normalization on given dimensional reduction
}
\concept{dimensional_reduction}
Seurat/man/STARmap-class.Rd 0000644 0001762 0000144 00000001301 14525500037 015125 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{STARmap-class}
\alias{STARmap-class}
\alias{STARmap}
\title{The STARmap class}
\description{
The STARmap class
}
\section{Slots}{
\describe{
\item{\code{assay}}{Name of assay to associate image data with; will give this image
priority for visualization when the assay is set as the active/default assay
in a \code{Seurat} object}
\item{\code{key}}{A one-length character vector with the object's key; keys must
be one or more alphanumeric characters followed by an underscore
\dQuote{\code{_}} (regex pattern
\dQuote{\code{^[a-zA-Z][a-zA-Z0-9]*_$}})}
}
}
\concept{objects}
\concept{spatial}
Seurat/man/AutoPointSize.Rd 0000644 0001762 0000144 00000001125 14525500037 015334 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{AutoPointSize}
\alias{AutoPointSize}
\title{Automagically calculate a point size for ggplot2-based scatter plots}
\usage{
AutoPointSize(data, raster = NULL)
}
\arguments{
\item{data}{A data frame being passed to ggplot2}
\item{raster}{If TRUE, point size is set to 1}
}
\value{
The "optimal" point size for visualizing these data
}
\description{
It happens to look good
}
\examples{
df <- data.frame(x = rnorm(n = 10000), y = runif(n = 10000))
AutoPointSize(data = df)
}
\concept{visualization}
Seurat/man/CreateSCTAssayObject.Rd 0000644 0001762 0000144 00000002233 14525500037 016465 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{CreateSCTAssayObject}
\alias{CreateSCTAssayObject}
\title{Create a SCT Assay object}
\usage{
CreateSCTAssayObject(
counts,
data,
scale.data = NULL,
umi.assay = "RNA",
min.cells = 0,
min.features = 0,
SCTModel.list = NULL
)
}
\arguments{
\item{counts}{Unnormalized data such as raw counts or TPMs}
\item{data}{Prenormalized data; if provided, do not pass \code{counts}}
\item{scale.data}{a residual matrix}
\item{umi.assay}{The UMI assay name. Default is RNA}
\item{min.cells}{Include features detected in at least this many cells. Will
subset the counts matrix as well. To reintroduce excluded features, create a
new object with a lower cutoff}
\item{min.features}{Include cells where at least this many features are
detected}
\item{SCTModel.list}{list of SCTModels}
}
\description{
Create a SCT object from a feature (e.g. gene) expression matrix and a list of SCTModels.
The expected format of the input matrix is features x cells.
}
\details{
Non-unique cell or feature names are not allowed. Please make unique before
calling this function.
}
\concept{objects}
Seurat/man/IntegrateData.Rd 0000644 0001762 0000144 00000013574 14525500037 015306 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{IntegrateData}
\alias{IntegrateData}
\title{Integrate data}
\usage{
IntegrateData(
anchorset,
new.assay.name = "integrated",
normalization.method = c("LogNormalize", "SCT"),
features = NULL,
features.to.integrate = NULL,
dims = 1:30,
k.weight = 100,
weight.reduction = NULL,
sd.weight = 1,
sample.tree = NULL,
preserve.order = FALSE,
eps = 0,
verbose = TRUE
)
}
\arguments{
\item{anchorset}{An \code{\link{AnchorSet}} object generated by
\code{\link{FindIntegrationAnchors}}}
\item{new.assay.name}{Name for the new assay containing the integrated data}
\item{normalization.method}{Name of normalization method used: LogNormalize
or SCT}
\item{features}{Vector of features to use when computing the PCA to determine
the weights. Only set if you want a different set from those used in the
anchor finding process}
\item{features.to.integrate}{Vector of features to integrate. By default,
will use the features used in anchor finding.}
\item{dims}{Number of dimensions to use in the anchor weighting procedure}
\item{k.weight}{Number of neighbors to consider when weighting anchors}
\item{weight.reduction}{Dimension reduction to use when calculating anchor
weights. This can be one of:
\itemize{
\item{A string, specifying the name of a dimension reduction present in
all objects to be integrated}
\item{A vector of strings, specifying the name of a dimension reduction to
use for each object to be integrated}
\item{A vector of \code{\link{DimReduc}} objects, specifying the object to
use for each object in the integration}
\item{NULL, in which case a new PCA will be calculated and used to
calculate anchor weights}
}
Note that, if specified, the requested dimension reduction will only be used
for calculating anchor weights in the first merge between reference and
query, as the merged object will subsequently contain more cells than was in
query, and weights will need to be calculated for all cells in the object.}
\item{sd.weight}{Controls the bandwidth of the Gaussian kernel for weighting}
\item{sample.tree}{Specify the order of integration. Order of integration
should be encoded in a matrix, where each row represents one of the pairwise
integration steps. Negative numbers specify a dataset, positive numbers
specify the integration results from a given row (the format of the merge
matrix included in the \code{\link{hclust}} function output). For example:
\code{matrix(c(-2, 1, -3, -1), ncol = 2)} gives:
\if{html}{\out{}}\preformatted{ [,1] [,2]
[1,] -2 -3
[2,] 1 -1
}\if{html}{\out{
}}
Which would cause dataset 2 and 3 to be integrated first, then the resulting
object integrated with dataset 1.
If NULL, the sample tree will be computed automatically.}
\item{preserve.order}{Do not reorder objects based on size for each pairwise
integration.}
\item{eps}{Error bound on the neighbor finding algorithm (from
\code{\link{RANN}})}
\item{verbose}{Print progress bars and output}
}
\value{
Returns a \code{\link{Seurat}} object with a new integrated
\code{\link{Assay}}. If \code{normalization.method = "LogNormalize"}, the
integrated data is returned to the \code{data} slot and can be treated as
log-normalized, corrected data. If \code{normalization.method = "SCT"}, the
integrated data is returned to the \code{scale.data} slot and can be treated
as centered, corrected Pearson residuals.
}
\description{
Perform dataset integration using a pre-computed \code{\link{AnchorSet}}.
}
\details{
The main steps of this procedure are outlined below. For a more detailed
description of the methodology, please see Stuart, Butler, et al Cell 2019.
\doi{10.1016/j.cell.2019.05.031}; \doi{10.1101/460147}
For pairwise integration:
\itemize{
\item{Construct a weights matrix that defines the association between each
query cell and each anchor. These weights are computed as 1 - the distance
between the query cell and the anchor divided by the distance of the query
cell to the \code{k.weight}th anchor multiplied by the anchor score
computed in \code{\link{FindIntegrationAnchors}}. We then apply a Gaussian
kernel width a bandwidth defined by \code{sd.weight} and normalize across
all \code{k.weight} anchors.}
\item{Compute the anchor integration matrix as the difference between the
two expression matrices for every pair of anchor cells}
\item{Compute the transformation matrix as the product of the integration
matrix and the weights matrix.}
\item{Subtract the transformation matrix from the original expression
matrix.}
}
For multiple dataset integration, we perform iterative pairwise integration.
To determine the order of integration (if not specified via
\code{sample.tree}), we
\itemize{
\item{Define a distance between datasets as the total number of cells in
the smaller dataset divided by the total number of anchors between the two
datasets.}
\item{Compute all pairwise distances between datasets}
\item{Cluster this distance matrix to determine a guide tree}
}
}
\examples{
\dontrun{
# to install the SeuratData package see https://github.com/satijalab/seurat-data
library(SeuratData)
data("panc8")
# panc8 is a merged Seurat object containing 8 separate pancreas datasets
# split the object by dataset
pancreas.list <- SplitObject(panc8, split.by = "tech")
# perform standard preprocessing on each object
for (i in 1:length(pancreas.list)) {
pancreas.list[[i]] <- NormalizeData(pancreas.list[[i]], verbose = FALSE)
pancreas.list[[i]] <- FindVariableFeatures(
pancreas.list[[i]], selection.method = "vst",
nfeatures = 2000, verbose = FALSE
)
}
# find anchors
anchors <- FindIntegrationAnchors(object.list = pancreas.list)
# integrate data
integrated <- IntegrateData(anchorset = anchors)
}
}
\references{
Stuart T, Butler A, et al. Comprehensive Integration of
Single-Cell Data. Cell. 2019;177:1888-1902 \doi{10.1016/j.cell.2019.05.031}
}
\concept{integration}
Seurat/man/MapQuery.Rd 0000644 0001762 0000144 00000006021 14525500037 014322 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{MapQuery}
\alias{MapQuery}
\title{Map query cells to a reference}
\usage{
MapQuery(
anchorset,
query,
reference,
refdata = NULL,
new.reduction.name = NULL,
reference.reduction = NULL,
reference.dims = NULL,
query.dims = NULL,
store.weights = FALSE,
reduction.model = NULL,
transferdata.args = list(),
integrateembeddings.args = list(),
projectumap.args = list(),
verbose = TRUE
)
}
\arguments{
\item{anchorset}{An AnchorSet object}
\item{query}{Query object used in anchorset construction}
\item{reference}{Reference object used in anchorset construction}
\item{refdata}{Data to transfer. This can be specified in one of two ways:
\itemize{
\item{The reference data itself as either a vector where the names
correspond to the reference cells, or a matrix, where the column names
correspond to the reference cells.}
\item{The name of the metadata field or assay from the reference object
provided. This requires the reference parameter to be specified. If pulling
assay data in this manner, it will pull the data from the data slot. To
transfer data from other slots, please pull the data explicitly with
\code{\link{GetAssayData}} and provide that matrix here.}
}}
\item{new.reduction.name}{Name for new integrated dimensional reduction.}
\item{reference.reduction}{Name of reduction to use from the reference for
neighbor finding}
\item{reference.dims}{Dimensions (columns) to use from reference}
\item{query.dims}{Dimensions (columns) to use from query}
\item{store.weights}{Determine if the weight and anchor matrices are stored.}
\item{reduction.model}{\code{DimReduc} object that contains the umap model}
\item{transferdata.args}{A named list of additional arguments to
\code{\link{TransferData}}}
\item{integrateembeddings.args}{A named list of additional arguments to
\code{\link{IntegrateEmbeddings}}}
\item{projectumap.args}{A named list of additional arguments to
\code{\link{ProjectUMAP}}}
\item{verbose}{Print progress bars and output}
}
\value{
Returns a modified query Seurat object containing:#'
\itemize{
\item{New Assays corresponding to the features transferred and/or their
corresponding prediction scores from \code{\link{TransferData}}}
\item{An integrated reduction from \code{\link{IntegrateEmbeddings}}}
\item{A projected UMAP reduction of the query cells projected into the
reference UMAP using \code{\link{ProjectUMAP}}}
}
}
\description{
This is a convenience wrapper function around the following three functions
that are often run together when mapping query data to a reference:
\code{\link{TransferData}}, \code{\link{IntegrateEmbeddings}},
\code{\link{ProjectUMAP}}. Note that by default, the \code{weight.reduction}
parameter for all functions will be set to the dimension reduction method
used in the \code{\link{FindTransferAnchors}} function call used to construct
the anchor object, and the \code{dims} parameter will be the same dimensions
used to find anchors.
}
\concept{integration}
Seurat/man/GetImage.Rd 0000644 0001762 0000144 00000001500 14525500037 014236 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{GetImage.SlideSeq}
\alias{GetImage.SlideSeq}
\alias{GetImage.STARmap}
\alias{GetImage.VisiumV1}
\title{Get Image Data}
\usage{
\method{GetImage}{SlideSeq}(object, mode = c("grob", "raster", "plotly", "raw"), ...)
\method{GetImage}{STARmap}(object, mode = c("grob", "raster", "plotly", "raw"), ...)
\method{GetImage}{VisiumV1}(object, mode = c("grob", "raster", "plotly", "raw"), ...)
}
\arguments{
\item{object}{An object}
\item{mode}{How to return the image; should accept one of \dQuote{grob},
\dQuote{raster}, \dQuote{plotly}, or \dQuote{raw}}
\item{...}{Arguments passed to other methods}
}
\description{
Get Image Data
}
\seealso{
\code{\link[SeuratObject:GetImage]{SeuratObject::GetImage}}
}
\concept{objects}
\concept{spatial}
Seurat/man/CellCycleScoring.Rd 0000644 0001762 0000144 00000002676 14525500037 015757 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{CellCycleScoring}
\alias{CellCycleScoring}
\title{Score cell cycle phases}
\usage{
CellCycleScoring(
object,
s.features,
g2m.features,
ctrl = NULL,
set.ident = FALSE,
...
)
}
\arguments{
\item{object}{A Seurat object}
\item{s.features}{A vector of features associated with S phase}
\item{g2m.features}{A vector of features associated with G2M phase}
\item{ctrl}{Number of control features selected from the same bin per
analyzed feature supplied to \code{\link{AddModuleScore}}.
Defaults to value equivalent to minimum number of features
present in 's.features' and 'g2m.features'.}
\item{set.ident}{If true, sets identity to phase assignments
Stashes old identities in 'old.ident'}
\item{...}{Arguments to be passed to \code{\link{AddModuleScore}}}
}
\value{
A Seurat object with the following columns added to object meta data: S.Score, G2M.Score, and Phase
}
\description{
Score cell cycle phases
}
\examples{
\dontrun{
data("pbmc_small")
# pbmc_small doesn't have any cell-cycle genes
# To run CellCycleScoring, please use a dataset with cell-cycle genes
# An example is available at http://satijalab.org/seurat/cell_cycle_vignette.html
pbmc_small <- CellCycleScoring(
object = pbmc_small,
g2m.features = cc.genes$g2m.genes,
s.features = cc.genes$s.genes
)
head(x = pbmc_small@meta.data)
}
}
\seealso{
\code{AddModuleScore}
}
\concept{utilities}
Seurat/man/ReadNanostring.Rd 0000644 0001762 0000144 00000011315 14525500037 015477 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R, R/convenience.R
\name{ReadNanostring}
\alias{ReadNanostring}
\alias{LoadNanostring}
\title{Read and Load Nanostring SMI data}
\usage{
ReadNanostring(
data.dir,
mtx.file = NULL,
metadata.file = NULL,
molecules.file = NULL,
segmentations.file = NULL,
type = "centroids",
mol.type = "pixels",
metadata = NULL,
mols.filter = NA_character_,
genes.filter = NA_character_,
fov.filter = NULL,
subset.counts.matrix = NULL,
cell.mols.only = TRUE
)
LoadNanostring(data.dir, fov, assay = "Nanostring")
}
\arguments{
\item{data.dir}{Path to folder containing Nanostring SMI outputs}
\item{mtx.file}{Path to Nanostring cell x gene matrix CSV}
\item{metadata.file}{Contains metadata including cell center, area,
and stain intensities}
\item{molecules.file}{Path to molecules file}
\item{segmentations.file}{Path to segmentations CSV}
\item{type}{Type of cell spatial coordinate matrices to read; choose one
or more of:
\itemize{
\item \dQuote{centroids}: cell centroids in pixel coordinate space
\item \dQuote{segmentations}: cell segmentations in pixel coordinate space
}}
\item{mol.type}{Type of molecule spatial coordinate matrices to read;
choose one or more of:
\itemize{
\item \dQuote{pixels}: molecule coordinates in pixel space
}}
\item{metadata}{Type of available metadata to read;
choose zero or more of:
\itemize{
\item \dQuote{Area}: number of pixels in cell segmentation
\item \dQuote{fov}: cell's fov
\item \dQuote{Mean.MembraneStain}: mean membrane stain intensity
\item \dQuote{Mean.DAPI}: mean DAPI stain intensity
\item \dQuote{Mean.G}: mean green channel stain intensity
\item \dQuote{Mean.Y}: mean yellow channel stain intensity
\item \dQuote{Mean.R}: mean red channel stain intensity
\item \dQuote{Max.MembraneStain}: max membrane stain intensity
\item \dQuote{Max.DAPI}: max DAPI stain intensity
\item \dQuote{Max.G}: max green channel stain intensity
\item \dQuote{Max.Y}: max yellow stain intensity
\item \dQuote{Max.R}: max red stain intensity
}}
\item{mols.filter}{Filter molecules that match provided string}
\item{genes.filter}{Filter genes from cell x gene matrix that match
provided string}
\item{fov.filter}{Only load in select FOVs. Nanostring SMI data contains
30 total FOVs.}
\item{subset.counts.matrix}{If the counts matrix should be built from
molecule coordinates for a specific segmentation; One of:
\itemize{
\item \dQuote{Nuclear}: nuclear segmentations
\item \dQuote{Cytoplasm}: cell cytoplasm segmentations
\item \dQuote{Membrane}: cell membrane segmentations
}}
\item{cell.mols.only}{If TRUE, only load molecules within a cell}
\item{fov}{Name to store FOV as}
\item{assay}{Name to store expression matrix as}
}
\value{
\code{ReadNanostring}: A list with some combination of the
following values:
\itemize{
\item \dQuote{\code{matrix}}: a
\link[Matrix:dgCMatrix-class]{sparse matrix} with expression data; cells
are columns and features are rows
\item \dQuote{\code{centroids}}: a data frame with cell centroid
coordinates in three columns: \dQuote{x}, \dQuote{y}, and \dQuote{cell}
\item \dQuote{\code{pixels}}: a data frame with molecule pixel coordinates
in three columns: \dQuote{x}, \dQuote{y}, and \dQuote{gene}
}
\code{LoadNanostring}: A \code{\link[SeuratObject]{Seurat}} object
}
\description{
Read and Load Nanostring SMI data
}
\note{
This function requires the
\href{https://cran.r-project.org/package=data.table}{\pkg{data.table}} package
to be installed
}
\section{Progress Updates with \pkg{progressr}}{
This function uses
\href{https://cran.r-project.org/package=progressr}{\pkg{progressr}} to
render status updates and progress bars. To enable progress updates, wrap
the function call in \code{\link[progressr]{with_progress}} or run
\code{\link[progressr:handlers]{handlers(global = TRUE)}} before running
this function. For more details about \pkg{progressr}, please read
\href{https://progressr.futureverse.org/articles/progressr-intro.html}{\code{vignette("progressr-intro")}}
}
\section{Parallelization with \pkg{future}}{
This function uses
\href{https://cran.r-project.org/package=future}{\pkg{future}} to enable
parallelization. Parallelization strategies can be set using
\code{\link[future]{plan}}. Common plans include \dQuote{\code{sequential}}
for non-parallelized processing or \dQuote{\code{multisession}} for parallel
evaluation using multiple \R sessions; for other plans, see the
\dQuote{Implemented evaluation strategies} section of
\code{\link[future:plan]{?future::plan}}. For a more thorough introduction
to \pkg{future}, see
\href{https://future.futureverse.org/articles/future-1-overview.html}{\code{vignette("future-1-overview")}}
}
\concept{future}
\concept{preprocessing}
Seurat/man/ReadVitessce.Rd 0000644 0001762 0000144 00000006725 14525500037 015153 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R, R/convenience.R
\name{ReadVitessce}
\alias{ReadVitessce}
\alias{LoadHuBMAPCODEX}
\title{Read Data From Vitessce}
\usage{
ReadVitessce(
counts = NULL,
coords = NULL,
molecules = NULL,
type = c("segmentations", "centroids"),
filter = NA_character_
)
LoadHuBMAPCODEX(data.dir, fov, assay = "CODEX")
}
\arguments{
\item{counts}{Path or URL to a Vitessce-formatted JSON file with
expression data; should end in \dQuote{\code{.genes.json}} or
\dQuote{\code{.clusters.json}}; pass \code{NULL} to skip}
\item{coords}{Path or URL to a Vitessce-formatted JSON file with cell/spot
spatial coordinates; should end in \dQuote{\code{.cells.json}};
pass \code{NULL} to skip}
\item{molecules}{Path or URL to a Vitessce-formatted JSON file with molecule
spatial coordinates; should end in \dQuote{\code{.molecules.json}};
pass \code{NULL} to skip}
\item{type}{Type of cell/spot spatial coordinates to return,
choose one or more from:
\itemize{
\item \dQuote{segmentations} cell/spot segmentations
\item \dQuote{centroids} cell/spot centroids
}}
\item{filter}{A character to filter molecules by, pass \code{NA} to skip
molecule filtering}
\item{data.dir}{Path to a directory containing Vitessce cells
and clusters JSONs}
\item{fov}{Name to store FOV as}
\item{assay}{Name to store expression matrix as}
}
\value{
\code{ReadVitessce}: A list with some combination of the
following values:
\itemize{
\item \dQuote{\code{counts}}: if \code{counts} is not \code{NULL}, an
expression matrix with cells as columns and features as rows
\item \dQuote{\code{centroids}}: if \code{coords} is not \code{NULL} and
\code{type} is contains\dQuote{centroids}, a data frame with cell centroids
in three columns: \dQuote{x}, \dQuote{y}, and \dQuote{cell}
\item \dQuote{\code{segmentations}}: if \code{coords} is not \code{NULL} and
\code{type} contains \dQuote{centroids}, a data frame with cell
segmentations in three columns: \dQuote{x}, \dQuote{y} and \dQuote{cell}
\item \dQuote{\code{molecules}}: if \code{molecules} is not \code{NULL}, a
data frame with molecule spatial coordinates in three columns: \dQuote{x},
\dQuote{y}, and \dQuote{gene}
}
\code{LoadHuBMAPCODEX}: A \code{\link[SeuratObject]{Seurat}} object
}
\description{
Read in data from Vitessce-formatted JSON files
}
\note{
This function requires the
\href{https://cran.r-project.org/package=jsonlite}{\pkg{jsonlite}} package
to be installed
}
\section{Progress Updates with \pkg{progressr}}{
This function uses
\href{https://cran.r-project.org/package=progressr}{\pkg{progressr}} to
render status updates and progress bars. To enable progress updates, wrap
the function call in \code{\link[progressr]{with_progress}} or run
\code{\link[progressr:handlers]{handlers(global = TRUE)}} before running
this function. For more details about \pkg{progressr}, please read
\href{https://progressr.futureverse.org/articles/progressr-intro.html}{\code{vignette("progressr-intro")}}
}
\examples{
\dontrun{
coords <- ReadVitessce(
counts =
"https://s3.amazonaws.com/vitessce-data/0.0.31/master_release/wang/wang.genes.json",
coords =
"https://s3.amazonaws.com/vitessce-data/0.0.31/master_release/wang/wang.cells.json",
molecules =
"https://s3.amazonaws.com/vitessce-data/0.0.31/master_release/wang/wang.molecules.json"
)
names(coords)
coords$counts[1:10, 1:10]
head(coords$centroids)
head(coords$segmentations)
head(coords$molecules)
}
}
\concept{preprocessing}
Seurat/man/TransferSketchLabels.Rd 0000644 0001762 0000144 00000003371 14525500037 016635 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sketching.R
\name{TransferSketchLabels}
\alias{TransferSketchLabels}
\title{Transfer data from sketch data to full data}
\usage{
TransferSketchLabels(
object,
sketched.assay = "sketch",
reduction,
dims,
refdata = NULL,
k = 50,
reduction.model = NULL,
neighbors = NULL,
recompute.neighbors = FALSE,
recompute.weights = FALSE,
verbose = TRUE
)
}
\arguments{
\item{object}{A Seurat object.}
\item{sketched.assay}{Sketched assay name. Default is 'sketch'.}
\item{reduction}{Dimensional reduction name to use for label transfer.}
\item{dims}{An integer vector indicating which dimensions to use for label transfer.}
\item{refdata}{A list of character strings indicating the metadata columns containing labels to transfer. Default is NULL.
Similar to refdata in `MapQuery`}
\item{k}{Number of neighbors to use for label transfer. Default is 50.}
\item{reduction.model}{Dimensional reduction model to use for label transfer. Default is NULL.}
\item{neighbors}{An object storing the neighbors found during the sketching process. Default is NULL.}
\item{recompute.neighbors}{Whether to recompute the neighbors for label transfer. Default is FALSE.}
\item{recompute.weights}{Whether to recompute the weights for label transfer. Default is FALSE.}
\item{verbose}{Print progress and diagnostic messages}
}
\value{
A Seurat object with transferred labels stored in the metadata. If a UMAP model is provided,
the full data are also projected onto the UMAP space, with the results stored in a new reduction, full.`reduction.model`
}
\description{
This function transfers cell type labels from a sketched dataset to a full dataset
based on the similarities in the lower dimensional space.
}
Seurat/man/FindTransferAnchors.Rd 0000644 0001762 0000144 00000020121 14525500037 016457 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{FindTransferAnchors}
\alias{FindTransferAnchors}
\title{Find transfer anchors}
\usage{
FindTransferAnchors(
reference,
query,
normalization.method = "LogNormalize",
recompute.residuals = TRUE,
reference.assay = NULL,
reference.neighbors = NULL,
query.assay = NULL,
reduction = "pcaproject",
reference.reduction = NULL,
project.query = FALSE,
features = NULL,
scale = TRUE,
npcs = 30,
l2.norm = TRUE,
dims = 1:30,
k.anchor = 5,
k.filter = NA,
k.score = 30,
max.features = 200,
nn.method = "annoy",
n.trees = 50,
eps = 0,
approx.pca = TRUE,
mapping.score.k = NULL,
verbose = TRUE
)
}
\arguments{
\item{reference}{\code{\link{Seurat}} object to use as the reference}
\item{query}{\code{\link{Seurat}} object to use as the query}
\item{normalization.method}{Name of normalization method used: LogNormalize
or SCT.}
\item{recompute.residuals}{If using SCT as a normalization method, compute
query Pearson residuals using the reference SCT model parameters.}
\item{reference.assay}{Name of the Assay to use from reference}
\item{reference.neighbors}{Name of the Neighbor to use from the reference.
Optionally enables reuse of precomputed neighbors.}
\item{query.assay}{Name of the Assay to use from query}
\item{reduction}{Dimensional reduction to perform when finding anchors.
Options are:
\itemize{
\item{pcaproject: Project the PCA from the reference onto the query. We
recommend using PCA when reference and query datasets are from scRNA-seq}
\item{lsiproject: Project the LSI from the reference onto the query. We
recommend using LSI when reference and query datasets are from scATAC-seq.
This requires that LSI has been computed for the reference dataset, and the
same features (eg, peaks or genome bins) are present in both the reference
and query. See \code{\link[Signac]{RunTFIDF}} and
\code{\link[Signac]{RunSVD}}}
\item{rpca: Project the PCA from the reference onto the query, and the PCA
from the query onto the reference (reciprocal PCA projection).}
\item{cca: Run a CCA on the reference and query }
}}
\item{reference.reduction}{Name of dimensional reduction to use from the
reference if running the pcaproject workflow. Optionally enables reuse of
precomputed reference dimensional reduction. If NULL (default), use a PCA
computed on the reference object.}
\item{project.query}{Project the PCA from the query dataset onto the
reference. Use only in rare cases where the query dataset has a much larger
cell number, but the reference dataset has a unique assay for transfer. In
this case, the default features will be set to the variable features of the
query object that are alos present in the reference.}
\item{features}{Features to use for dimensional reduction. If not specified,
set as variable features of the reference object which are also present in
the query.}
\item{scale}{Scale query data.}
\item{npcs}{Number of PCs to compute on reference if reference.reduction is
not provided.}
\item{l2.norm}{Perform L2 normalization on the cell embeddings after
dimensional reduction}
\item{dims}{Which dimensions to use from the reduction to specify the
neighbor search space}
\item{k.anchor}{How many neighbors (k) to use when finding anchors}
\item{k.filter}{How many neighbors (k) to use when filtering anchors. Set to
NA to turn off filtering.}
\item{k.score}{How many neighbors (k) to use when scoring anchors}
\item{max.features}{The maximum number of features to use when specifying the
neighborhood search space in the anchor filtering}
\item{nn.method}{Method for nearest neighbor finding. Options include: rann,
annoy}
\item{n.trees}{More trees gives higher precision when using annoy approximate
nearest neighbor search}
\item{eps}{Error bound on the neighbor finding algorithm (from
\code{\link{RANN}} or \code{\link{RcppAnnoy}})}
\item{approx.pca}{Use truncated singular value decomposition to approximate
PCA}
\item{mapping.score.k}{Compute and store nearest k query neighbors in the
AnchorSet object that is returned. You can optionally set this if you plan
on computing the mapping score and want to enable reuse of some downstream
neighbor calculations to make the mapping score function more efficient.}
\item{verbose}{Print progress bars and output}
}
\value{
Returns an \code{AnchorSet} object that can be used as input to
\code{\link{TransferData}}, \code{\link{IntegrateEmbeddings}} and
\code{\link{MapQuery}}. The dimension reduction used for finding anchors is
stored in the \code{AnchorSet} object and can be used for computing anchor
weights in downstream functions. Note that only the requested dimensions are
stored in the dimension reduction object in the \code{AnchorSet}. This means
that if \code{dims=2:20} is used, for example, the dimension of the stored
reduction is \code{1:19}.
}
\description{
Find a set of anchors between a reference and query object. These
anchors can later be used to transfer data from the reference to
query object using the \code{\link{TransferData}} object.
}
\details{
The main steps of this procedure are outlined below. For a more detailed
description of the methodology, please see Stuart, Butler, et al Cell 2019.
\doi{10.1016/j.cell.2019.05.031}; \doi{10.1101/460147}
\itemize{
\item{Perform dimensional reduction. Exactly what is done here depends on
the values set for the \code{reduction} and \code{project.query}
parameters. If \code{reduction = "pcaproject"}, a PCA is performed on
either the reference (if \code{project.query = FALSE}) or the query (if
\code{project.query = TRUE}), using the \code{features} specified. The data
from the other dataset is then projected onto this learned PCA structure.
If \code{reduction = "cca"}, then CCA is performed on the reference and
query for this dimensional reduction step. If
\code{reduction = "lsiproject"}, the stored LSI dimension reduction in the
reference object is used to project the query dataset onto the reference.
If \code{l2.norm} is set to \code{TRUE}, perform L2 normalization of the
embedding vectors.}
\item{Identify anchors between the reference and query - pairs of cells
from each dataset that are contained within each other's neighborhoods
(also known as mutual nearest neighbors).}
\item{Filter low confidence anchors to ensure anchors in the low dimension
space are in broad agreement with the high dimensional measurements. This
is done by looking at the neighbors of each query cell in the reference
dataset using \code{max.features} to define this space. If the reference
cell isn't found within the first \code{k.filter} neighbors, remove the
anchor.}
\item{Assign each remaining anchor a score. For each anchor cell, determine
the nearest \code{k.score} anchors within its own dataset and within its
pair's dataset. Based on these neighborhoods, construct an overall neighbor
graph and then compute the shared neighbor overlap between anchor and query
cells (analogous to an SNN graph). We use the 0.01 and 0.90 quantiles on
these scores to dampen outlier effects and rescale to range between 0-1.}
}
}
\examples{
\dontrun{
# to install the SeuratData package see https://github.com/satijalab/seurat-data
library(SeuratData)
data("pbmc3k")
# for demonstration, split the object into reference and query
pbmc.reference <- pbmc3k[, 1:1350]
pbmc.query <- pbmc3k[, 1351:2700]
# perform standard preprocessing on each object
pbmc.reference <- NormalizeData(pbmc.reference)
pbmc.reference <- FindVariableFeatures(pbmc.reference)
pbmc.reference <- ScaleData(pbmc.reference)
pbmc.query <- NormalizeData(pbmc.query)
pbmc.query <- FindVariableFeatures(pbmc.query)
pbmc.query <- ScaleData(pbmc.query)
# find anchors
anchors <- FindTransferAnchors(reference = pbmc.reference, query = pbmc.query)
# transfer labels
predictions <- TransferData(
anchorset = anchors,
refdata = pbmc.reference$seurat_annotations
)
pbmc.query <- AddMetaData(object = pbmc.query, metadata = predictions)
}
}
\references{
Stuart T, Butler A, et al. Comprehensive Integration of
Single-Cell Data. Cell. 2019;177:1888-1902 \doi{10.1016/j.cell.2019.05.031};
}
\concept{integration}
Seurat/man/contrast-theory.Rd 0000644 0001762 0000144 00000001316 14525500037 015726 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{contrast-theory}
\alias{contrast-theory}
\alias{Intensity}
\alias{Luminance}
\title{Get the intensity and/or luminance of a color}
\source{
\url{https://stackoverflow.com/questions/3942878/how-to-decide-font-color-in-white-or-black-depending-on-background-color}
}
\usage{
Intensity(color)
Luminance(color)
}
\arguments{
\item{color}{A vector of colors}
}
\value{
A vector of intensities/luminances for each color
}
\description{
Get the intensity and/or luminance of a color
}
\examples{
Intensity(color = c('black', 'white', '#E76BF3'))
Luminance(color = c('black', 'white', '#E76BF3'))
}
\concept{visualization}
Seurat/man/DimPlot.Rd 0000644 0001762 0000144 00000011467 14525500037 014141 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R, R/convenience.R
\name{DimPlot}
\alias{DimPlot}
\alias{TSNEPlot}
\alias{PCAPlot}
\alias{ICAPlot}
\alias{UMAPPlot}
\title{Dimensional reduction plot}
\usage{
DimPlot(
object,
dims = c(1, 2),
cells = NULL,
cols = NULL,
pt.size = NULL,
reduction = NULL,
group.by = NULL,
split.by = NULL,
shape.by = NULL,
order = NULL,
shuffle = FALSE,
seed = 1,
label = FALSE,
label.size = 4,
label.color = "black",
label.box = FALSE,
repel = FALSE,
alpha = 1,
cells.highlight = NULL,
cols.highlight = "#DE2D26",
sizes.highlight = 1,
na.value = "grey50",
ncol = NULL,
combine = TRUE,
raster = NULL,
raster.dpi = c(512, 512)
)
PCAPlot(object, ...)
TSNEPlot(object, ...)
UMAPPlot(object, ...)
}
\arguments{
\item{object}{Seurat object}
\item{dims}{Dimensions to plot, must be a two-length numeric vector specifying x- and y-dimensions}
\item{cells}{Vector of cells to plot (default is all cells)}
\item{cols}{Vector of colors, each color corresponds to an identity class. This may also be a single character
or numeric value corresponding to a palette as specified by \code{\link[RColorBrewer]{brewer.pal.info}}.
By default, ggplot2 assigns colors. We also include a number of palettes from the pals package.
See \code{\link{DiscretePalette}} for details.}
\item{pt.size}{Adjust point size for plotting}
\item{reduction}{Which dimensionality reduction to use. If not specified, first searches for umap, then tsne, then pca}
\item{group.by}{Name of one or more metadata columns to group (color) cells by
(for example, orig.ident); pass 'ident' to group by identity class}
\item{split.by}{A factor in object metadata to split the plot by, pass 'ident'
to split by cell identity'}
\item{shape.by}{If NULL, all points are circles (default). You can specify any
cell attribute (that can be pulled with FetchData) allowing for both
different colors and different shapes on cells. Only applicable if \code{raster = FALSE}.}
\item{order}{Specify the order of plotting for the idents. This can be
useful for crowded plots if points of interest are being buried. Provide
either a full list of valid idents or a subset to be plotted last (on top)}
\item{shuffle}{Whether to randomly shuffle the order of points. This can be
useful for crowded plots if points of interest are being buried. (default is FALSE)}
\item{seed}{Sets the seed if randomly shuffling the order of points.}
\item{label}{Whether to label the clusters}
\item{label.size}{Sets size of labels}
\item{label.color}{Sets the color of the label text}
\item{label.box}{Whether to put a box around the label text (geom_text vs
geom_label)}
\item{repel}{Repel labels}
\item{alpha}{Alpha value for plotting (default is 1)}
\item{cells.highlight}{A list of character or numeric vectors of cells to
highlight. If only one group of cells desired, can simply
pass a vector instead of a list. If set, colors selected cells to the color(s)
in \code{cols.highlight} and other cells black (white if dark.theme = TRUE);
will also resize to the size(s) passed to \code{sizes.highlight}}
\item{cols.highlight}{A vector of colors to highlight the cells as; will
repeat to the length groups in cells.highlight}
\item{sizes.highlight}{Size of highlighted cells; will repeat to the length
groups in cells.highlight. If \code{sizes.highlight = TRUE} size of all
points will be this value.}
\item{na.value}{Color value for NA points when using custom scale}
\item{ncol}{Number of columns for display when combining plots}
\item{combine}{Combine plots into a single \code{\link[patchwork]{patchwork}ed}
ggplot object. If \code{FALSE}, return a list of ggplot objects}
\item{raster}{Convert points to raster format, default is \code{NULL} which
automatically rasterizes if plotting more than 100,000 cells}
\item{raster.dpi}{Pixel resolution for rasterized plots, passed to geom_scattermore().
Default is c(512, 512).}
\item{...}{Extra parameters passed to \code{DimPlot}}
}
\value{
A \code{\link[patchwork]{patchwork}ed} ggplot object if
\code{combine = TRUE}; otherwise, a list of ggplot objects
}
\description{
Graphs the output of a dimensional reduction technique on a 2D scatter plot where each point is a
cell and it's positioned based on the cell embeddings determined by the reduction technique. By
default, cells are colored by their identity class (can be changed with the group.by parameter).
}
\note{
For the old \code{do.hover} and \code{do.identify} functionality, please see
\code{HoverLocator} and \code{CellSelector}, respectively.
}
\examples{
data("pbmc_small")
DimPlot(object = pbmc_small)
DimPlot(object = pbmc_small, split.by = 'letter.idents')
}
\seealso{
\code{\link{FeaturePlot}} \code{\link{HoverLocator}}
\code{\link{CellSelector}} \code{\link{FetchData}}
}
\concept{convenience}
\concept{visualization}
Seurat/man/merge.SCTAssay.Rd 0000644 0001762 0000144 00000002031 14525500037 015304 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{merge.SCTAssay}
\alias{merge.SCTAssay}
\title{Merge SCTAssay objects}
\usage{
\method{merge}{SCTAssay}(
x = NULL,
y = NULL,
add.cell.ids = NULL,
merge.data = TRUE,
na.rm = TRUE,
...
)
}
\arguments{
\item{x}{A \code{\link[SeuratObject]{Seurat}} object}
\item{y}{A single \code{Seurat} object or a list of \code{Seurat} objects}
\item{add.cell.ids}{A character vector of \code{length(x = c(x, y))};
appends the corresponding values to the start of each objects' cell names}
\item{merge.data}{Merge the data slots instead of just merging the counts
(which requires renormalization); this is recommended if the same
normalization approach was applied to all objects}
\item{na.rm}{If na.rm = TRUE, this will only preserve residuals that are
present in all SCTAssays being merged. Otherwise, missing residuals will be
populated with NAs.}
\item{...}{Arguments passed to other methods}
}
\description{
Merge SCTAssay objects
}
\concept{objects}
Seurat/man/JackStrawPlot.Rd 0000644 0001762 0000144 00000003301 14525500037 015305 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{JackStrawPlot}
\alias{JackStrawPlot}
\title{JackStraw Plot}
\usage{
JackStrawPlot(
object,
dims = 1:5,
cols = NULL,
reduction = "pca",
xmax = 0.1,
ymax = 0.3
)
}
\arguments{
\item{object}{Seurat object}
\item{dims}{Dims to plot}
\item{cols}{Vector of colors, each color corresponds to an individual PC. This may also be a single character
or numeric value corresponding to a palette as specified by \code{\link[RColorBrewer]{brewer.pal.info}}.
By default, ggplot2 assigns colors. We also include a number of palettes from the pals package.
See \code{\link{DiscretePalette}} for details.}
\item{reduction}{reduction to pull jackstraw info from}
\item{xmax}{X-axis maximum on each QQ plot.}
\item{ymax}{Y-axis maximum on each QQ plot.}
}
\value{
A ggplot object
}
\description{
Plots the results of the JackStraw analysis for PCA significance. For each
PC, plots a QQ-plot comparing the distribution of p-values for all genes
across each PC, compared with a uniform distribution. Also determines a
p-value for the overall significance of each PC (see Details).
}
\details{
Significant PCs should show a p-value distribution (black curve) that is
strongly skewed to the left compared to the null distribution (dashed line)
The p-value for each PC is based on a proportion test comparing the number
of genes with a p-value below a particular threshold (score.thresh), compared with the
proportion of genes expected under a uniform distribution of p-values.
}
\examples{
data("pbmc_small")
JackStrawPlot(object = pbmc_small)
}
\seealso{
\code{\link{ScoreJackStraw}}
}
\author{
Omri Wurtzel
}
\concept{visualization}
Seurat/man/LinkedPlots.Rd 0000644 0001762 0000144 00000003467 14525500037 015022 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{LinkedPlots}
\alias{LinkedPlots}
\alias{LinkedDimPlot}
\alias{LinkedPlot}
\alias{LinkedFeaturePlot}
\title{Visualize spatial and clustering (dimensional reduction) data in a linked,
interactive framework}
\usage{
LinkedDimPlot(
object,
dims = 1:2,
reduction = NULL,
image = NULL,
group.by = NULL,
alpha = c(0.1, 1),
combine = TRUE
)
LinkedFeaturePlot(
object,
feature,
dims = 1:2,
reduction = NULL,
image = NULL,
slot = "data",
alpha = c(0.1, 1),
combine = TRUE
)
}
\arguments{
\item{object}{A Seurat object}
\item{dims}{Dimensions to plot, must be a two-length numeric vector specifying x- and y-dimensions}
\item{reduction}{Which dimensionality reduction to use. If not specified, first searches for umap, then tsne, then pca}
\item{image}{Name of the image to use in the plot}
\item{group.by}{Name of meta.data column to group the data by}
\item{alpha}{Controls opacity of spots. Provide as a vector specifying the
min and max for SpatialFeaturePlot. For SpatialDimPlot, provide a single
alpha value for each plot.}
\item{combine}{Combine plots into a single gg object; note that if TRUE;
themeing will not work when plotting multiple features/groupings}
\item{feature}{Feature to visualize}
\item{slot}{If plotting a feature, which data slot to pull from (counts,
data, or scale.data)}
}
\value{
Returns final plots. If \code{combine}, plots are stiched together
using \code{\link{CombinePlots}}; otherwise, returns a list of ggplot objects
}
\description{
Visualize spatial and clustering (dimensional reduction) data in a linked,
interactive framework
}
\examples{
\dontrun{
LinkedDimPlot(seurat.object)
LinkedFeaturePlot(seurat.object, feature = 'Hpca')
}
}
\concept{spatial}
\concept{visualization}
Seurat/man/ProjectDimReduc.Rd 0000644 0001762 0000144 00000002661 14525500037 015610 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration.R
\name{ProjectDimReduc}
\alias{ProjectDimReduc}
\title{Project query data to reference dimensional reduction}
\usage{
ProjectDimReduc(
query,
reference,
mode = c("pcaproject", "lsiproject"),
reference.reduction,
combine = FALSE,
query.assay = NULL,
reference.assay = NULL,
features = NULL,
do.scale = TRUE,
reduction.name = NULL,
reduction.key = NULL,
verbose = TRUE
)
}
\arguments{
\item{query}{Query object}
\item{reference}{Reference object}
\item{mode}{Projection mode name for projection
\itemize{
\item{pcaproject: PCA projection}
\item{lsiproject: LSI projection}
}}
\item{reference.reduction}{Name of dimensional reduction in the reference object}
\item{combine}{Determine if query and reference objects are combined}
\item{query.assay}{Assay used for query object}
\item{reference.assay}{Assay used for reference object}
\item{features}{Features used for projection}
\item{do.scale}{Determine if scale expression matrix in the pcaproject mode}
\item{reduction.name}{dimensional reduction name, reference.reduction is used by default}
\item{reduction.key}{dimensional reduction key, the key in reference.reduction
is used by default}
\item{verbose}{Print progress and message}
}
\value{
Returns a query-only or query-reference combined seurat object
}
\description{
Project query data to reference dimensional reduction
}
Seurat/man/VlnPlot.Rd 0000644 0001762 0000144 00000005353 14525500037 014164 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{VlnPlot}
\alias{VlnPlot}
\title{Single cell violin plot}
\usage{
VlnPlot(
object,
features,
cols = NULL,
pt.size = NULL,
alpha = 1,
idents = NULL,
sort = FALSE,
assay = NULL,
group.by = NULL,
split.by = NULL,
adjust = 1,
y.max = NULL,
same.y.lims = FALSE,
log = FALSE,
ncol = NULL,
slot = deprecated(),
layer = NULL,
split.plot = FALSE,
stack = FALSE,
combine = TRUE,
fill.by = "feature",
flip = FALSE,
add.noise = TRUE,
raster = NULL
)
}
\arguments{
\item{object}{Seurat object}
\item{features}{Features to plot (gene expression, metrics, PC scores,
anything that can be retreived by FetchData)}
\item{cols}{Colors to use for plotting}
\item{pt.size}{Point size for points}
\item{alpha}{Alpha value for points}
\item{idents}{Which classes to include in the plot (default is all)}
\item{sort}{Sort identity classes (on the x-axis) by the average
expression of the attribute being potted, can also pass 'increasing' or 'decreasing' to change sort direction}
\item{assay}{Name of assay to use, defaults to the active assay}
\item{group.by}{Group (color) cells in different ways (for example, orig.ident)}
\item{split.by}{A factor in object metadata to split the plot by, pass 'ident'
to split by cell identity'}
\item{adjust}{Adjust parameter for geom_violin}
\item{y.max}{Maximum y axis value}
\item{same.y.lims}{Set all the y-axis limits to the same values}
\item{log}{plot the feature axis on log scale}
\item{ncol}{Number of columns if multiple plots are displayed}
\item{slot}{Slot to pull expression data from (e.g. "counts" or "data")}
\item{layer}{Layer to pull expression data from (e.g. "counts" or "data")}
\item{split.plot}{plot each group of the split violin plots by multiple or
single violin shapes.}
\item{stack}{Horizontally stack plots for each feature}
\item{combine}{Combine plots into a single \code{\link[patchwork]{patchwork}ed}
ggplot object. If \code{FALSE}, return a list of ggplot}
\item{fill.by}{Color violins/ridges based on either 'feature' or 'ident'}
\item{flip}{flip plot orientation (identities on x-axis)}
\item{add.noise}{determine if adding a small noise for plotting}
\item{raster}{Convert points to raster format. Requires 'ggrastr' to be installed.}
}
\value{
A \code{\link[patchwork]{patchwork}ed} ggplot object if
\code{combine = TRUE}; otherwise, a list of ggplot objects
}
\description{
Draws a violin plot of single cell data (gene expression, metrics, PC
scores, etc.)
}
\examples{
data("pbmc_small")
VlnPlot(object = pbmc_small, features = 'PC_1')
VlnPlot(object = pbmc_small, features = 'LYZ', split.by = 'groups')
}
\seealso{
\code{\link{FetchData}}
}
\concept{visualization}
Seurat/man/CellsByImage.Rd 0000644 0001762 0000144 00000001205 14525500037 015056 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{CellsByImage}
\alias{CellsByImage}
\title{Get a vector of cell names associated with an image (or set of images)}
\usage{
CellsByImage(object, images = NULL, unlist = FALSE)
}
\arguments{
\item{object}{Seurat object}
\item{images}{Vector of image names}
\item{unlist}{Return as a single vector of cell names as opposed to a list,
named by image name.}
}
\value{
A vector of cell names
}
\description{
Get a vector of cell names associated with an image (or set of images)
}
\examples{
\dontrun{
CellsByImage(object = object, images = "slice1")
}
}
Seurat/man/writing-integration.Rd 0000644 0001762 0000144 00000003400 14525500037 016561 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integration5.R
\name{writing-integration}
\alias{writing-integration}
\title{Writing Integration Method Functions}
\description{
Integration method functions can be written by anyone to implement any
integration method in Seurat. These methods should expect to take a
\link[SeuratObject:Assay5]{v5 assay} as input and return a named list of
objects that can be added back to a \code{Seurat} object (eg. a
\link[SeuratObject:DimReduc]{dimensional reduction} or cell-level meta data)
}
\section{Provided Parameters}{
Every integration method function should expect the following arguments:
\itemize{
\item \dQuote{\code{object}}: an \code{\link[SeuratObject]{Assay5}} object
\item \dQuote{\code{orig}}: \link[SeuratObject:DimReduc]{dimensional
reduction} to correct
\item \dQuote{\code{layers}}: names of normalized layers in \code{object}
\item \dQuote{\code{scale.layer}}: name(s) of scaled layer(s) in
\code{object}
\item \dQuote{\code{features}}: a vector of features for integration
\item \dQuote{\code{groups}}: a one-column data frame with the groups for
each cell in \code{object}; the column name will be \dQuote{group}
}
}
\section{Method Discovery}{
The documentation for \code{\link{IntegrateLayers}()} will automatically
link to integration method functions provided by packages in the
\code{\link[base]{search}()} space. To make an integration method function
discoverable by the documentation, simply add an attribute named
\dQuote{\code{Seurat.method}} to the function with a value of
\dQuote{\code{integration}}
\preformatted{
attr(MyIntegrationFunction, which = "Seurat.method") <- "integration"
}
}
\seealso{
\code{\link{IntegrateLayers}()}
}
\concept{integration}
\keyword{internal}
Seurat/man/FeaturePlot.Rd 0000644 0001762 0000144 00000013202 14525500037 015010 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{FeaturePlot}
\alias{FeaturePlot}
\alias{FeatureHeatmap}
\title{Visualize 'features' on a dimensional reduction plot}
\usage{
FeaturePlot(
object,
features,
dims = c(1, 2),
cells = NULL,
cols = if (blend) {
c("lightgrey", "#ff0000", "#00ff00")
} else {
c("lightgrey", "blue")
},
pt.size = NULL,
alpha = 1,
order = FALSE,
min.cutoff = NA,
max.cutoff = NA,
reduction = NULL,
split.by = NULL,
keep.scale = "feature",
shape.by = NULL,
slot = "data",
blend = FALSE,
blend.threshold = 0.5,
label = FALSE,
label.size = 4,
label.color = "black",
repel = FALSE,
ncol = NULL,
coord.fixed = FALSE,
by.col = TRUE,
sort.cell = deprecated(),
interactive = FALSE,
combine = TRUE,
raster = NULL,
raster.dpi = c(512, 512)
)
}
\arguments{
\item{object}{Seurat object}
\item{features}{Vector of features to plot. Features can come from:
\itemize{
\item An \code{Assay} feature (e.g. a gene name - "MS4A1")
\item A column name from meta.data (e.g. mitochondrial percentage -
"percent.mito")
\item A column name from a \code{DimReduc} object corresponding to the
cell embedding values (e.g. the PC 1 scores - "PC_1")
}}
\item{dims}{Dimensions to plot, must be a two-length numeric vector specifying x- and y-dimensions}
\item{cells}{Vector of cells to plot (default is all cells)}
\item{cols}{The two colors to form the gradient over. Provide as string vector with
the first color corresponding to low values, the second to high. Also accepts a Brewer
color scale or vector of colors. Note: this will bin the data into number of colors provided.
When blend is \code{TRUE}, takes anywhere from 1-3 colors:
\describe{
\item{1 color:}{Treated as color for double-negatives, will use default colors 2 and 3 for per-feature expression}
\item{2 colors:}{Treated as colors for per-feature expression, will use default color 1 for double-negatives}
\item{3+ colors:}{First color used for double-negatives, colors 2 and 3 used for per-feature expression, all others ignored}
}}
\item{pt.size}{Adjust point size for plotting}
\item{alpha}{Alpha value for plotting (default is 1)}
\item{order}{Boolean determining whether to plot cells in order of expression. Can be useful if
cells expressing given feature are getting buried.}
\item{min.cutoff, max.cutoff}{Vector of minimum and maximum cutoff values for each feature,
may specify quantile in the form of 'q##' where '##' is the quantile (eg, 'q1', 'q10')}
\item{reduction}{Which dimensionality reduction to use. If not specified, first searches for umap, then tsne, then pca}
\item{split.by}{A factor in object metadata to split the plot by, pass 'ident'
to split by cell identity'}
\item{keep.scale}{How to handle the color scale across multiple plots. Options are:
\itemize{
\item \dQuote{feature} (default; by row/feature scaling): The plots for
each individual feature are scaled to the maximum expression of the
feature across the conditions provided to \code{split.by}
\item \dQuote{all} (universal scaling): The plots for all features and
conditions are scaled to the maximum expression value for the feature
with the highest overall expression
\item \code{all} (no scaling): Each individual plot is scaled to the
maximum expression value of the feature in the condition provided to
\code{split.by}. Be aware setting \code{NULL} will result in color
scales that are not comparable between plots
}}
\item{shape.by}{If NULL, all points are circles (default). You can specify any
cell attribute (that can be pulled with FetchData) allowing for both
different colors and different shapes on cells. Only applicable if \code{raster = FALSE}.}
\item{slot}{Which slot to pull expression data from?}
\item{blend}{Scale and blend expression values to visualize coexpression of two features}
\item{blend.threshold}{The color cutoff from weak signal to strong signal; ranges from 0 to 1.}
\item{label}{Whether to label the clusters}
\item{label.size}{Sets size of labels}
\item{label.color}{Sets the color of the label text}
\item{repel}{Repel labels}
\item{ncol}{Number of columns to combine multiple feature plots to, ignored if \code{split.by} is not \code{NULL}}
\item{coord.fixed}{Plot cartesian coordinates with fixed aspect ratio}
\item{by.col}{If splitting by a factor, plot the splits per column with the features as rows; ignored if \code{blend = TRUE}}
\item{sort.cell}{Redundant with \code{order}. This argument is being
deprecated. Please use \code{order} instead.}
\item{interactive}{Launch an interactive \code{\link[Seurat:IFeaturePlot]{FeaturePlot}}}
\item{combine}{Combine plots into a single \code{\link[patchwork]{patchwork}ed}
ggplot object. If \code{FALSE}, return a list of ggplot objects}
\item{raster}{Convert points to raster format, default is \code{NULL} which
automatically rasterizes if plotting more than 100,000 cells}
\item{raster.dpi}{Pixel resolution for rasterized plots, passed to geom_scattermore().
Default is c(512, 512).}
}
\value{
A \code{\link[patchwork]{patchwork}ed} ggplot object if
\code{combine = TRUE}; otherwise, a list of ggplot objects
}
\description{
Colors single cells on a dimensional reduction plot according to a 'feature'
(i.e. gene expression, PC scores, number of genes detected, etc.)
}
\note{
For the old \code{do.hover} and \code{do.identify} functionality, please see
\code{HoverLocator} and \code{CellSelector}, respectively.
}
\examples{
data("pbmc_small")
FeaturePlot(object = pbmc_small, features = 'PC_1')
}
\seealso{
\code{\link{DimPlot}} \code{\link{HoverLocator}}
\code{\link{CellSelector}}
}
\concept{visualization}
Seurat/man/TopNeighbors.Rd 0000644 0001762 0000144 00000000736 14525500037 015171 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{TopNeighbors}
\alias{TopNeighbors}
\title{Get nearest neighbors for given cell}
\usage{
TopNeighbors(object, cell, n = 5)
}
\arguments{
\item{object}{\code{\link{Neighbor}} object}
\item{cell}{Cell of interest}
\item{n}{Number of neighbors to return}
}
\value{
Returns a vector of cell names
}
\description{
Return a vector of cell names of the nearest n cells.
}
\concept{objects}
Seurat/man/SaveAnnoyIndex.Rd 0000644 0001762 0000144 00000000574 14525500037 015461 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{SaveAnnoyIndex}
\alias{SaveAnnoyIndex}
\title{Save the Annoy index}
\usage{
SaveAnnoyIndex(object, file)
}
\arguments{
\item{object}{A Neighbor object with the annoy index stored}
\item{file}{Path to file to write index to}
}
\description{
Save the Annoy index
}
\concept{utilities}
Seurat/man/VST.Rd 0000644 0001762 0000144 00000003364 14525500037 013242 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/preprocessing5.R
\name{VST}
\alias{VST}
\alias{VST.default}
\alias{VST.IterableMatrix}
\alias{VST.dgCMatrix}
\alias{VST.matrix}
\title{Variance Stabilizing Transformation}
\usage{
VST(data, margin = 1L, nselect = 2000L, span = 0.3, clip = NULL, ...)
\method{VST}{default}(data, margin = 1L, nselect = 2000L, span = 0.3, clip = NULL, ...)
\method{VST}{IterableMatrix}(
data,
margin = 1L,
nselect = 2000L,
span = 0.3,
clip = NULL,
verbose = TRUE,
...
)
\method{VST}{dgCMatrix}(
data,
margin = 1L,
nselect = 2000L,
span = 0.3,
clip = NULL,
verbose = TRUE,
...
)
\method{VST}{matrix}(data, margin = 1L, nselect = 2000L, span = 0.3, clip = NULL, ...)
}
\arguments{
\item{data}{A matrix-like object}
\item{margin}{Unused}
\item{nselect}{Number of of features to select}
\item{span}{the parameter \eqn{\alpha} which controls the degree of
smoothing.}
\item{clip}{Upper bound for values post-standardization; defaults to the
square root of the number of cells}
\item{...}{Arguments passed to other methods}
\item{verbose}{...}
}
\value{
A data frame with the following columns:
\itemize{
\item \dQuote{\code{mean}}: ...
\item \dQuote{\code{variance}}: ...
\item \dQuote{\code{variance.expected}}: ...
\item \dQuote{\code{variance.standardized}}: ...
\item \dQuote{\code{variable}}: \code{TRUE} if the feature selected as
variable, otherwise \code{FALSE}
\item \dQuote{\code{rank}}: If the feature is selected as variable, then how
it compares to other variable features with lower ranks as more variable;
otherwise, \code{NA}
}
}
\description{
Apply variance stabilizing transformation for selection of variable features
}
\keyword{internal}
Seurat/man/RunPCA.Rd 0000644 0001762 0000144 00000005102 14525500037 013646 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/dimensional_reduction.R
\name{RunPCA}
\alias{RunPCA}
\alias{RunPCA.default}
\alias{RunPCA.Assay}
\alias{RunPCA.Seurat}
\title{Run Principal Component Analysis}
\usage{
RunPCA(object, ...)
\method{RunPCA}{default}(
object,
assay = NULL,
npcs = 50,
rev.pca = FALSE,
weight.by.var = TRUE,
verbose = TRUE,
ndims.print = 1:5,
nfeatures.print = 30,
reduction.key = "PC_",
seed.use = 42,
approx = TRUE,
...
)
\method{RunPCA}{Assay}(
object,
assay = NULL,
features = NULL,
npcs = 50,
rev.pca = FALSE,
weight.by.var = TRUE,
verbose = TRUE,
ndims.print = 1:5,
nfeatures.print = 30,
reduction.key = "PC_",
seed.use = 42,
...
)
\method{RunPCA}{Seurat}(
object,
assay = NULL,
features = NULL,
npcs = 50,
rev.pca = FALSE,
weight.by.var = TRUE,
verbose = TRUE,
ndims.print = 1:5,
nfeatures.print = 30,
reduction.name = "pca",
reduction.key = "PC_",
seed.use = 42,
...
)
}
\arguments{
\item{object}{An object}
\item{...}{Arguments passed to other methods and IRLBA}
\item{assay}{Name of Assay PCA is being run on}
\item{npcs}{Total Number of PCs to compute and store (50 by default)}
\item{rev.pca}{By default computes the PCA on the cell x gene matrix. Setting
to true will compute it on gene x cell matrix.}
\item{weight.by.var}{Weight the cell embeddings by the variance of each PC
(weights the gene loadings if rev.pca is TRUE)}
\item{verbose}{Print the top genes associated with high/low loadings for
the PCs}
\item{ndims.print}{PCs to print genes for}
\item{nfeatures.print}{Number of genes to print for each PC}
\item{reduction.key}{dimensional reduction key, specifies the string before
the number for the dimension names. PC by default}
\item{seed.use}{Set a random seed. By default, sets the seed to 42. Setting
NULL will not set a seed.}
\item{approx}{Use truncated singular value decomposition to approximate PCA}
\item{features}{Features to compute PCA on. If features=NULL, PCA will be run
using the variable features for the Assay. Note that the features must be present
in the scaled data. Any requested features that are not scaled or have 0 variance
will be dropped, and the PCA will be run using the remaining features.}
\item{reduction.name}{dimensional reduction name, pca by default}
}
\value{
Returns Seurat object with the PCA calculation stored in the reductions slot
}
\description{
Run a PCA dimensionality reduction. For details about stored PCA calculation
parameters, see \code{PrintPCAParams}.
}
\concept{dimensional_reduction}
Seurat/man/ColorDimSplit.Rd 0000644 0001762 0000144 00000010665 14525500037 015314 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{ColorDimSplit}
\alias{ColorDimSplit}
\title{Color dimensional reduction plot by tree split}
\usage{
ColorDimSplit(
object,
node,
left.color = "red",
right.color = "blue",
other.color = "grey50",
...
)
}
\arguments{
\item{object}{Seurat object}
\item{node}{Node in cluster tree on which to base the split}
\item{left.color}{Color for the left side of the split}
\item{right.color}{Color for the right side of the split}
\item{other.color}{Color for all other cells}
\item{...}{
Arguments passed on to \code{\link[=DimPlot]{DimPlot}}
\describe{
\item{\code{dims}}{Dimensions to plot, must be a two-length numeric vector specifying x- and y-dimensions}
\item{\code{cells}}{Vector of cells to plot (default is all cells)}
\item{\code{cols}}{Vector of colors, each color corresponds to an identity class. This may also be a single character
or numeric value corresponding to a palette as specified by \code{\link[RColorBrewer]{brewer.pal.info}}.
By default, ggplot2 assigns colors. We also include a number of palettes from the pals package.
See \code{\link{DiscretePalette}} for details.}
\item{\code{pt.size}}{Adjust point size for plotting}
\item{\code{reduction}}{Which dimensionality reduction to use. If not specified, first searches for umap, then tsne, then pca}
\item{\code{group.by}}{Name of one or more metadata columns to group (color) cells by
(for example, orig.ident); pass 'ident' to group by identity class}
\item{\code{split.by}}{A factor in object metadata to split the plot by, pass 'ident'
to split by cell identity'}
\item{\code{shape.by}}{If NULL, all points are circles (default). You can specify any
cell attribute (that can be pulled with FetchData) allowing for both
different colors and different shapes on cells. Only applicable if \code{raster = FALSE}.}
\item{\code{order}}{Specify the order of plotting for the idents. This can be
useful for crowded plots if points of interest are being buried. Provide
either a full list of valid idents or a subset to be plotted last (on top)}
\item{\code{shuffle}}{Whether to randomly shuffle the order of points. This can be
useful for crowded plots if points of interest are being buried. (default is FALSE)}
\item{\code{seed}}{Sets the seed if randomly shuffling the order of points.}
\item{\code{label}}{Whether to label the clusters}
\item{\code{label.size}}{Sets size of labels}
\item{\code{label.color}}{Sets the color of the label text}
\item{\code{label.box}}{Whether to put a box around the label text (geom_text vs
geom_label)}
\item{\code{alpha}}{Alpha value for plotting (default is 1)}
\item{\code{repel}}{Repel labels}
\item{\code{cells.highlight}}{A list of character or numeric vectors of cells to
highlight. If only one group of cells desired, can simply
pass a vector instead of a list. If set, colors selected cells to the color(s)
in \code{cols.highlight} and other cells black (white if dark.theme = TRUE);
will also resize to the size(s) passed to \code{sizes.highlight}}
\item{\code{cols.highlight}}{A vector of colors to highlight the cells as; will
repeat to the length groups in cells.highlight}
\item{\code{sizes.highlight}}{Size of highlighted cells; will repeat to the length
groups in cells.highlight. If \code{sizes.highlight = TRUE} size of all
points will be this value.}
\item{\code{na.value}}{Color value for NA points when using custom scale}
\item{\code{ncol}}{Number of columns for display when combining plots}
\item{\code{combine}}{Combine plots into a single \code{\link[patchwork]{patchwork}ed}
ggplot object. If \code{FALSE}, return a list of ggplot objects}
\item{\code{raster}}{Convert points to raster format, default is \code{NULL} which
automatically rasterizes if plotting more than 100,000 cells}
\item{\code{raster.dpi}}{Pixel resolution for rasterized plots, passed to geom_scattermore().
Default is c(512, 512).}
}}
}
\value{
Returns a DimPlot
}
\description{
Returns a DimPlot colored based on whether the cells fall in clusters
to the left or to the right of a node split in the cluster tree.
}
\examples{
\dontrun{
if (requireNamespace("ape", quietly = TRUE)) {
data("pbmc_small")
pbmc_small <- BuildClusterTree(object = pbmc_small, verbose = FALSE)
PlotClusterTree(pbmc_small)
ColorDimSplit(pbmc_small, node = 5)
}
}
}
\seealso{
\code{\link{DimPlot}}
}
\concept{visualization}
Seurat/man/HTODemux.Rd 0000644 0001762 0000144 00000004054 14525500037 014220 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{HTODemux}
\alias{HTODemux}
\title{Demultiplex samples based on data from cell 'hashing'}
\usage{
HTODemux(
object,
assay = "HTO",
positive.quantile = 0.99,
init = NULL,
nstarts = 100,
kfunc = "clara",
nsamples = 100,
seed = 42,
verbose = TRUE
)
}
\arguments{
\item{object}{Seurat object. Assumes that the hash tag oligo (HTO) data has been added and normalized.}
\item{assay}{Name of the Hashtag assay (HTO by default)}
\item{positive.quantile}{The quantile of inferred 'negative' distribution for each hashtag - over which the cell is considered 'positive'. Default is 0.99}
\item{init}{Initial number of clusters for hashtags. Default is the # of hashtag oligo names + 1 (to account for negatives)}
\item{nstarts}{nstarts value for k-means clustering (for kfunc = "kmeans"). 100 by default}
\item{kfunc}{Clustering function for initial hashtag grouping. Default is "clara" for fast k-medoids clustering on large applications, also support "kmeans" for kmeans clustering}
\item{nsamples}{Number of samples to be drawn from the dataset used for clustering, for kfunc = "clara"}
\item{seed}{Sets the random seed. If NULL, seed is not set}
\item{verbose}{Prints the output}
}
\value{
The Seurat object with the following demultiplexed information stored in the meta data:
\describe{
\item{hash.maxID}{Name of hashtag with the highest signal}
\item{hash.secondID}{Name of hashtag with the second highest signal}
\item{hash.margin}{The difference between signals for hash.maxID and hash.secondID}
\item{classification}{Classification result, with doublets/multiplets named by the top two highest hashtags}
\item{classification.global}{Global classification result (singlet, doublet or negative)}
\item{hash.ID}{Classification result where doublet IDs are collapsed}
}
}
\description{
Assign sample-of-origin for each cell, annotate doublets.
}
\examples{
\dontrun{
object <- HTODemux(object)
}
}
\seealso{
\code{\link{HTOHeatmap}}
}
\concept{preprocessing}
Seurat/man/as.Seurat.Rd 0000644 0001762 0000144 00000002410 14525500037 014422 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\name{as.Seurat.CellDataSet}
\alias{as.Seurat.CellDataSet}
\alias{as.Seurat.SingleCellExperiment}
\title{Convert objects to \code{Seurat} objects}
\usage{
\method{as.Seurat}{CellDataSet}(x, slot = "counts", assay = "RNA", verbose = TRUE, ...)
\method{as.Seurat}{SingleCellExperiment}(
x,
counts = "counts",
data = "logcounts",
assay = NULL,
project = "SingleCellExperiment",
...
)
}
\arguments{
\item{x}{An object to convert to class \code{Seurat}}
\item{slot}{Slot to store expression data as}
\item{assay}{Name of assays to convert; set to \code{NULL} for all assays to be converted}
\item{verbose}{Show progress updates}
\item{...}{Arguments passed to other methods}
\item{counts}{name of the SingleCellExperiment assay to store as \code{counts};
set to \code{NULL} if only normalized data are present}
\item{data}{name of the SingleCellExperiment assay to slot as \code{data}.
Set to NULL if only counts are present}
\item{project}{Project name for new Seurat object}
}
\value{
A \code{Seurat} object generated from \code{x}
}
\description{
Convert objects to \code{Seurat} objects
}
\seealso{
\code{\link[SeuratObject:as.Seurat]{SeuratObject::as.Seurat}}
}
\concept{objects}
Seurat/DESCRIPTION 0000644 0001762 0000144 00000015325 14525771316 013244 0 ustar ligges users Package: Seurat
Version: 5.0.1
Date: 2023-11-16
Title: Tools for Single Cell Genomics
Description: A toolkit for quality control, analysis, and exploration of single cell RNA sequencing data. 'Seurat' aims to enable users to identify and interpret sources of heterogeneity from single cell transcriptomic measurements, and to integrate diverse types of single cell data. See Satija R, Farrell J, Gennert D, et al (2015) , Macosko E, Basu A, Satija R, et al (2015) , Stuart T, Butler A, et al (2019) , and Hao, Hao, et al (2020) for more details.
Authors@R: c(
person(given = "Andrew", family = "Butler", email = "abutler@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0003-3608-0463")),
person(given = "Saket", family = "Choudhary", email = "schoudhary@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0001-5202-7633")),
person(given = "Charlotte", family = "Darby", email = "cdarby@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0003-2195-5300")),
person(given = "Jeff", family = "Farrell", email = "jfarrell@g.harvard.edu", role = "ctb"),
person(given = "Isabella", family = "Grabski", email = "igrabski@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0002-0616-5469")),
person(given = "Christoph", family = "Hafemeister", email = "chafemeister@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0001-6365-8254")),
person(given = "Yuhan", family = "Hao", email = "yhao@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0002-1810-0822")),
person(given = "Austin", family = "Hartman", email = "ahartman@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0001-7278-1852")),
person(given = "Paul", family = "Hoffman", email = "hoff0792@umn.edu", role = "ctb", comment = c(ORCID = "0000-0002-7693-8957")),
person(given = "Jaison", family = "Jain", email = "jjain@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0002-9478-5018")),
person(given = "Longda", family = "Jiang", email = "ljiang@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0003-4964-6497")),
person(given = "Madeline", family = "Kowalski", email = "mkowalski@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0002-5655-7620")),
person(given = "Skylar", family = "Li", email = "sli@nygenome.org", role = "ctb"),
person(given = "Gesmira", family = "Molla", email = 'gmolla@nygenome.org', role = 'ctb', comment = c(ORCID = '0000-0002-8628-5056')),
person(given = "Efthymia", family = "Papalexi", email = "epapalexi@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0001-5898-694X")),
person(given = "Patrick", family = "Roelli", email = "proelli@nygenome.org", role = "ctb"),
person(given = "Rahul", family = "Satija", email = "seurat@nygenome.org", role = c("aut", "cre"), comment = c(ORCID = "0000-0001-9448-8833")),
person(given = "Karthik", family = "Shekhar", email = "kshekhar@berkeley.edu", role = "ctb"),
person(given = "Avi", family = "Srivastava", email = "asrivastava@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0001-9798-2079")),
person(given = "Tim", family = "Stuart", email = "tstuart@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0002-3044-0897")),
person(given = "Kristof", family = "Torkenczy", email = "", role = "ctb", comment = c(ORCID = "0000-0002-4869-7957")),
person(given = "Shiwei", family = "Zheng", email = "szheng@nygenome.org", role = "ctb", comment = c(ORCID = "0000-0001-6682-6743")),
person("Satija Lab and Collaborators", role = "fnd")
)
URL: https://satijalab.org/seurat, https://github.com/satijalab/seurat
BugReports: https://github.com/satijalab/seurat/issues
Additional_repositories: https://satijalab.r-universe.dev,
https://bnprks.r-universe.dev
Depends: R (>= 4.0.0), methods, SeuratObject (>= 5.0.0)
Imports: cluster, cowplot, fastDummies, fitdistrplus, future,
future.apply, generics (>= 0.1.3), ggplot2 (>= 3.3.0), ggrepel,
ggridges, graphics, grDevices, grid, httr, ica, igraph, irlba,
jsonlite, KernSmooth, leiden (>= 0.3.1), lifecycle, lmtest,
MASS, Matrix (>= 1.5-0), matrixStats, miniUI, patchwork,
pbapply, plotly (>= 4.9.0), png, progressr, purrr, RANN,
RColorBrewer, Rcpp (>= 1.0.7), RcppAnnoy (>= 0.0.18), RcppHNSW,
reticulate, rlang, ROCR, RSpectra, Rtsne, scales, scattermore
(>= 1.2), sctransform (>= 0.4.1), shiny, spatstat.explore,
spatstat.geom, stats, tibble, tools, utils, uwot (>= 0.1.10)
LinkingTo: Rcpp (>= 0.11.0), RcppEigen, RcppProgress
License: MIT + file LICENSE
LazyData: true
Collate: 'RcppExports.R' 'reexports.R' 'generics.R' 'clustering.R'
'visualization.R' 'convenience.R' 'data.R'
'differential_expression.R' 'dimensional_reduction.R'
'integration.R' 'zzz.R' 'integration5.R' 'mixscape.R'
'objects.R' 'preprocessing.R' 'preprocessing5.R' 'roxygen.R'
'sketching.R' 'tree.R' 'utilities.R'
RoxygenNote: 7.2.3
Encoding: UTF-8
Suggests: ape, BPCells, rsvd, testthat, hdf5r, S4Vectors,
SummarizedExperiment, SingleCellExperiment, MAST, DESeq2,
BiocGenerics, GenomicRanges, GenomeInfoDb, IRanges,
rtracklayer, Rfast2, monocle, Biobase, VGAM, limma, metap,
enrichR, mixtools, ggrastr, data.table, R.utils, presto,
DelayedArray, harmony
NeedsCompilation: yes
Packaged: 2023-11-16 20:50:34 UTC; mollag
Author: Andrew Butler [ctb] (),
Saket Choudhary [ctb] (),
Charlotte Darby [ctb] (),
Jeff Farrell [ctb],
Isabella Grabski [ctb] (),
Christoph Hafemeister [ctb] (),
Yuhan Hao [ctb] (),
Austin Hartman [ctb] (),
Paul Hoffman [ctb] (),
Jaison Jain [ctb] (),
Longda Jiang [ctb] (),
Madeline Kowalski [ctb] (),
Skylar Li [ctb],
Gesmira Molla [ctb] (),
Efthymia Papalexi [ctb] (),
Patrick Roelli [ctb],
Rahul Satija [aut, cre] (),
Karthik Shekhar [ctb],
Avi Srivastava [ctb] (),
Tim Stuart [ctb] (),
Kristof Torkenczy [ctb] (),
Shiwei Zheng [ctb] (),
Satija Lab and Collaborators [fnd]
Maintainer: Rahul Satija
Repository: CRAN
Date/Publication: 2023-11-17 23:10:06 UTC
Seurat/build/ 0000755 0001762 0000144 00000000000 14525500232 012612 5 ustar ligges users Seurat/build/partial.rdb 0000644 0001762 0000144 00000116625 14525500221 014750 0 ustar ligges users zV$
4I
h@h@6@s}$@623[-IGmY[-۲tdI؞ef<;;ggfgvw>|OdUT!͈"Q:@%}"7o|?ɴeӞik'ϾLaty8t}zm3zcӬ`1r^0\`S3̞6o->w
O⿱c⃥g/;v=:'guպQ=qO&^i`(}lnxN[hȁȗZIwA2\W+=F̟?q_,h|Qson'y7+G7BM'eqk6?#??~jdybmmpm}jY+^A4sd0ox0,soZ]n,㶭BIJ7ل_TWv?'40xv=?ĞuуRn~2d#F-1+N6P&B*=R?! GD9mq`^GxspԺw5|j|>"8F9NyRYuߪV44'F;FHE$TDn{!Ű܋,c
5bT=ڋ!dYDd!]:{gg D@I|J Ƭ/-nC:A3miZ]UicPή.Mm?|L_]5bGp,ˉ<