dbplyr/ 0000755 0001762 0000144 00000000000 14033054373 011550 5 ustar ligges users dbplyr/NAMESPACE 0000644 0001762 0000144 00000035567 14032677513 013015 0 ustar ligges users # Generated by roxygen2: do not edit by hand
S3method(anti_join,tbl_lazy)
S3method(arrange,tbl_lazy)
S3method(as.data.frame,tbl_lazy)
S3method(as.data.frame,tbl_sql)
S3method(as.sql,Id)
S3method(as.sql,character)
S3method(as.sql,dbplyr_schema)
S3method(as.sql,ident)
S3method(as.sql,sql)
S3method(auto_copy,tbl_sql)
S3method(c,ident)
S3method(c,sql)
S3method(collapse,tbl_sql)
S3method(collect,tbl_sql)
S3method(compute,tbl_sql)
S3method(copy_to,src_sql)
S3method(count,tbl_lazy)
S3method(db_analyze,DBIConnection)
S3method(db_collect,DBIConnection)
S3method(db_compute,DBIConnection)
S3method(db_connection_describe,DBIConnection)
S3method(db_connection_describe,MariaDBConnection)
S3method(db_connection_describe,MySQL)
S3method(db_connection_describe,MySQLConnection)
S3method(db_connection_describe,OdbcConnection)
S3method(db_connection_describe,PostgreSQL)
S3method(db_connection_describe,PostgreSQLConnection)
S3method(db_connection_describe,PqConnection)
S3method(db_connection_describe,SQLiteConnection)
S3method(db_copy_to,DBIConnection)
S3method(db_create_index,DBIConnection)
S3method(db_desc,DBIConnection)
S3method(db_explain,DBIConnection)
S3method(db_query_fields,DBIConnection)
S3method(db_query_fields,PostgreSQLConnection)
S3method(db_save_query,DBIConnection)
S3method(db_sql_render,DBIConnection)
S3method(db_table_temporary,"Microsoft SQL Server")
S3method(db_table_temporary,DBIConnection)
S3method(db_table_temporary,HDB)
S3method(db_write_table,DBIConnection)
S3method(db_write_table,PostgreSQLConnection)
S3method(dbi_quote,ident_q)
S3method(dbplyr_edition,"Microsoft SQL Server")
S3method(dbplyr_edition,ACCESS)
S3method(dbplyr_edition,HDB)
S3method(dbplyr_edition,Hive)
S3method(dbplyr_edition,Impala)
S3method(dbplyr_edition,MariaDBConnection)
S3method(dbplyr_edition,MySQL)
S3method(dbplyr_edition,MySQLConnection)
S3method(dbplyr_edition,OdbcConnection)
S3method(dbplyr_edition,OraConnection)
S3method(dbplyr_edition,Oracle)
S3method(dbplyr_edition,PostgreSQL)
S3method(dbplyr_edition,PostgreSQLConnection)
S3method(dbplyr_edition,PqConnection)
S3method(dbplyr_edition,Redshift)
S3method(dbplyr_edition,RedshiftConnection)
S3method(dbplyr_edition,SQLiteConnection)
S3method(dbplyr_edition,Teradata)
S3method(dbplyr_edition,TestConnection)
S3method(dbplyr_edition,default)
S3method(dbplyr_fill0,ACCESS)
S3method(dbplyr_fill0,DBIConnection)
S3method(dbplyr_fill0,HDB)
S3method(dbplyr_fill0,MariaDBConnection)
S3method(dbplyr_fill0,MySQL)
S3method(dbplyr_fill0,MySQLConnection)
S3method(dbplyr_fill0,PostgreSQL)
S3method(dbplyr_fill0,PqConnection)
S3method(dbplyr_fill0,SQLiteConnection)
S3method(dim,tbl_lazy)
S3method(dimnames,tbl_lazy)
S3method(distinct,tbl_lazy)
S3method(do,tbl_sql)
S3method(escape,"NULL")
S3method(escape,Date)
S3method(escape,POSIXt)
S3method(escape,blob)
S3method(escape,character)
S3method(escape,data.frame)
S3method(escape,double)
S3method(escape,factor)
S3method(escape,ident)
S3method(escape,ident_q)
S3method(escape,integer)
S3method(escape,integer64)
S3method(escape,list)
S3method(escape,logical)
S3method(escape,reactivevalues)
S3method(escape,sql)
S3method(explain,tbl_sql)
S3method(format,ident)
S3method(format,sql)
S3method(format,src_sql)
S3method(full_join,tbl_lazy)
S3method(group_by,tbl_lazy)
S3method(group_size,tbl_sql)
S3method(group_vars,tbl_lazy)
S3method(groups,tbl_lazy)
S3method(head,tbl_lazy)
S3method(inner_join,tbl_lazy)
S3method(last_value_sql,DBIConnection)
S3method(last_value_sql,Hive)
S3method(left_join,tbl_lazy)
S3method(mutate,tbl_lazy)
S3method(n_groups,tbl_sql)
S3method(names,sql_variant)
S3method(op_desc,op)
S3method(op_desc,op_arrange)
S3method(op_desc,op_base_remote)
S3method(op_desc,op_group_by)
S3method(op_frame,op_base)
S3method(op_frame,op_double)
S3method(op_frame,op_frame)
S3method(op_frame,op_single)
S3method(op_frame,tbl_lazy)
S3method(op_grps,op_base)
S3method(op_grps,op_double)
S3method(op_grps,op_group_by)
S3method(op_grps,op_select)
S3method(op_grps,op_single)
S3method(op_grps,op_summarise)
S3method(op_grps,op_ungroup)
S3method(op_grps,tbl_lazy)
S3method(op_sort,op_arrange)
S3method(op_sort,op_base)
S3method(op_sort,op_double)
S3method(op_sort,op_order)
S3method(op_sort,op_single)
S3method(op_sort,op_summarise)
S3method(op_sort,tbl_lazy)
S3method(op_vars,op_base)
S3method(op_vars,op_distinct)
S3method(op_vars,op_double)
S3method(op_vars,op_join)
S3method(op_vars,op_select)
S3method(op_vars,op_semi_join)
S3method(op_vars,op_set_op)
S3method(op_vars,op_single)
S3method(op_vars,op_summarise)
S3method(op_vars,tbl_lazy)
S3method(print,dbplyr_schema)
S3method(print,ident)
S3method(print,join_query)
S3method(print,op_base_local)
S3method(print,op_base_remote)
S3method(print,op_single)
S3method(print,select_query)
S3method(print,semi_join_query)
S3method(print,set_op_query)
S3method(print,sql)
S3method(print,sql_variant)
S3method(print,tbl_lazy)
S3method(print,tbl_sql)
S3method(pull,tbl_sql)
S3method(relocate,tbl_lazy)
S3method(rename,tbl_lazy)
S3method(rename_with,tbl_lazy)
S3method(right_join,tbl_lazy)
S3method(same_src,src_sql)
S3method(same_src,tbl_lazy)
S3method(same_src,tbl_sql)
S3method(select,tbl_lazy)
S3method(semi_join,tbl_lazy)
S3method(show_query,tbl_lazy)
S3method(slice,tbl_lazy)
S3method(slice_head,tbl_lazy)
S3method(slice_max,tbl_lazy)
S3method(slice_min,tbl_lazy)
S3method(slice_sample,tbl_lazy)
S3method(slice_tail,tbl_lazy)
S3method(sql_build,ident)
S3method(sql_build,op_arrange)
S3method(sql_build,op_base_local)
S3method(sql_build,op_base_remote)
S3method(sql_build,op_distinct)
S3method(sql_build,op_filter)
S3method(sql_build,op_frame)
S3method(sql_build,op_group_by)
S3method(sql_build,op_head)
S3method(sql_build,op_join)
S3method(sql_build,op_order)
S3method(sql_build,op_select)
S3method(sql_build,op_semi_join)
S3method(sql_build,op_set_op)
S3method(sql_build,op_summarise)
S3method(sql_build,op_ungroup)
S3method(sql_build,tbl_lazy)
S3method(sql_escape_date,ACCESS)
S3method(sql_escape_date,DBIConnection)
S3method(sql_escape_datetime,ACCESS)
S3method(sql_escape_datetime,DBIConnection)
S3method(sql_escape_ident,DBIConnection)
S3method(sql_escape_ident,TestConnection)
S3method(sql_escape_logical,"Microsoft SQL Server")
S3method(sql_escape_logical,ACCESS)
S3method(sql_escape_logical,DBIConnection)
S3method(sql_escape_logical,SQLiteConnection)
S3method(sql_escape_raw,"Microsoft SQL Server")
S3method(sql_escape_raw,DBIConnection)
S3method(sql_escape_string,DBIConnection)
S3method(sql_escape_string,TestConnection)
S3method(sql_expr_matches,DBIConnection)
S3method(sql_expr_matches,MariaDBConnection)
S3method(sql_expr_matches,MySQL)
S3method(sql_expr_matches,MySQLConnection)
S3method(sql_expr_matches,OraConnection)
S3method(sql_expr_matches,Oracle)
S3method(sql_expr_matches,PostgreSQL)
S3method(sql_expr_matches,PostgreSQLConnection)
S3method(sql_expr_matches,PqConnection)
S3method(sql_expr_matches,SQLiteConnection)
S3method(sql_join,DBIConnection)
S3method(sql_join_suffix,DBIConnection)
S3method(sql_optimise,ident)
S3method(sql_optimise,query)
S3method(sql_optimise,select_query)
S3method(sql_optimise,sql)
S3method(sql_query_explain,DBIConnection)
S3method(sql_query_explain,Oracle)
S3method(sql_query_explain,PostgreSQL)
S3method(sql_query_explain,PostgreSQLConnection)
S3method(sql_query_explain,PqConnection)
S3method(sql_query_explain,SQLiteConnection)
S3method(sql_query_fields,DBIConnection)
S3method(sql_query_join,DBIConnection)
S3method(sql_query_join,MariaDBConnection)
S3method(sql_query_join,MySQL)
S3method(sql_query_join,MySQLConnection)
S3method(sql_query_join,SQLiteConnection)
S3method(sql_query_rows,DBIConnection)
S3method(sql_query_save,"Microsoft SQL Server")
S3method(sql_query_save,DBIConnection)
S3method(sql_query_select,"Microsoft SQL Server")
S3method(sql_query_select,ACCESS)
S3method(sql_query_select,DBIConnection)
S3method(sql_query_select,OraConnection)
S3method(sql_query_select,Oracle)
S3method(sql_query_select,Teradata)
S3method(sql_query_semi_join,DBIConnection)
S3method(sql_query_set_op,DBIConnection)
S3method(sql_query_set_op,SQLiteConnection)
S3method(sql_query_wrap,DBIConnection)
S3method(sql_query_wrap,OraConnection)
S3method(sql_query_wrap,Oracle)
S3method(sql_query_wrap,SQLiteConnection)
S3method(sql_render,ident)
S3method(sql_render,join_query)
S3method(sql_render,op)
S3method(sql_render,select_query)
S3method(sql_render,semi_join_query)
S3method(sql_render,set_op_query)
S3method(sql_render,sql)
S3method(sql_render,tbl_lazy)
S3method(sql_select,DBIConnection)
S3method(sql_semi_join,DBIConnection)
S3method(sql_set_op,DBIConnection)
S3method(sql_subquery,DBIConnection)
S3method(sql_table_analyze,"Microsoft SQL Server")
S3method(sql_table_analyze,ACCESS)
S3method(sql_table_analyze,DBIConnection)
S3method(sql_table_analyze,HDB)
S3method(sql_table_analyze,Hive)
S3method(sql_table_analyze,Impala)
S3method(sql_table_analyze,MariaDBConnection)
S3method(sql_table_analyze,MySQL)
S3method(sql_table_analyze,MySQLConnection)
S3method(sql_table_analyze,OraConnection)
S3method(sql_table_analyze,Oracle)
S3method(sql_table_analyze,Snowflake)
S3method(sql_table_analyze,Teradata)
S3method(sql_table_index,DBIConnection)
S3method(sql_translate_env,DBIConnection)
S3method(sql_translation,"Microsoft SQL Server")
S3method(sql_translation,ACCESS)
S3method(sql_translation,DBIConnection)
S3method(sql_translation,HDB)
S3method(sql_translation,Hive)
S3method(sql_translation,Impala)
S3method(sql_translation,MariaDBConnection)
S3method(sql_translation,MySQL)
S3method(sql_translation,MySQLConnection)
S3method(sql_translation,OdbcConnection)
S3method(sql_translation,OraConnection)
S3method(sql_translation,Oracle)
S3method(sql_translation,PostgreSQL)
S3method(sql_translation,PostgreSQLConnection)
S3method(sql_translation,PqConnection)
S3method(sql_translation,Redshift)
S3method(sql_translation,RedshiftConnection)
S3method(sql_translation,SQLiteConnection)
S3method(sql_translation,Snowflake)
S3method(sql_translation,Teradata)
S3method(src_tbls,src_sql)
S3method(summarise,tbl_lazy)
S3method(tail,tbl_lazy)
S3method(tally,tbl_lazy)
S3method(tbl,src_dbi)
S3method(tbl_sum,tbl_sql)
S3method(tbl_vars,tbl_lazy)
S3method(transmute,tbl_lazy)
S3method(ungroup,tbl_lazy)
S3method(union_all,tbl_lazy)
S3method(unique,sql)
export("%>%")
export(add_op_single)
export(as.sql)
export(base_agg)
export(base_no_win)
export(base_odbc_agg)
export(base_odbc_scalar)
export(base_odbc_win)
export(base_scalar)
export(base_win)
export(build_sql)
export(copy_lahman)
export(copy_nycflights13)
export(db_collect)
export(db_compute)
export(db_connection_describe)
export(db_copy_to)
export(db_sql_render)
export(db_table_temporary)
export(dbplyr_edition)
export(dbplyr_uncount)
export(escape)
export(escape_ansi)
export(has_lahman)
export(has_nycflights13)
export(ident)
export(ident_q)
export(in_schema)
export(is.ident)
export(is.sql)
export(join_query)
export(lahman_mysql)
export(lahman_postgres)
export(lahman_sqlite)
export(lahman_srcs)
export(lazy_frame)
export(memdb_frame)
export(named_commas)
export(nycflights13_postgres)
export(nycflights13_sqlite)
export(op_base)
export(op_double)
export(op_frame)
export(op_grps)
export(op_single)
export(op_sort)
export(op_vars)
export(partial_eval)
export(remote_con)
export(remote_name)
export(remote_query)
export(remote_query_plan)
export(remote_src)
export(select_query)
export(semi_join_query)
export(set_op_query)
export(simulate_access)
export(simulate_dbi)
export(simulate_hana)
export(simulate_hive)
export(simulate_impala)
export(simulate_mssql)
export(simulate_mysql)
export(simulate_odbc)
export(simulate_oracle)
export(simulate_postgres)
export(simulate_redshift)
export(simulate_snowflake)
export(simulate_sqlite)
export(simulate_teradata)
export(sql)
export(sql_aggregate)
export(sql_aggregate_2)
export(sql_aggregate_n)
export(sql_build)
export(sql_call2)
export(sql_cast)
export(sql_cot)
export(sql_escape_date)
export(sql_escape_datetime)
export(sql_escape_logical)
export(sql_escape_raw)
export(sql_expr)
export(sql_expr_matches)
export(sql_infix)
export(sql_join_suffix)
export(sql_log)
export(sql_not_supported)
export(sql_optimise)
export(sql_paste)
export(sql_paste_infix)
export(sql_prefix)
export(sql_query_explain)
export(sql_query_fields)
export(sql_query_join)
export(sql_query_rows)
export(sql_query_save)
export(sql_query_select)
export(sql_query_semi_join)
export(sql_query_set_op)
export(sql_query_wrap)
export(sql_quote)
export(sql_render)
export(sql_str_sub)
export(sql_substr)
export(sql_table_analyze)
export(sql_table_index)
export(sql_translation)
export(sql_translator)
export(sql_try_cast)
export(sql_variant)
export(sql_vector)
export(src_dbi)
export(src_memdb)
export(src_sql)
export(src_test)
export(tbl_lazy)
export(tbl_memdb)
export(tbl_sql)
export(test_frame)
export(test_load)
export(test_register_con)
export(test_register_src)
export(translate_sql)
export(translate_sql_)
export(win_absent)
export(win_aggregate)
export(win_aggregate_2)
export(win_cumulative)
export(win_current_frame)
export(win_current_group)
export(win_current_order)
export(win_over)
export(win_rank)
export(win_recycled)
export(window_frame)
export(window_order)
import(DBI)
import(rlang)
importFrom(R6,R6Class)
importFrom(assertthat,assert_that)
importFrom(assertthat,is.flag)
importFrom(dplyr,anti_join)
importFrom(dplyr,arrange)
importFrom(dplyr,auto_copy)
importFrom(dplyr,collapse)
importFrom(dplyr,collect)
importFrom(dplyr,compute)
importFrom(dplyr,copy_to)
importFrom(dplyr,count)
importFrom(dplyr,db_analyze)
importFrom(dplyr,db_create_index)
importFrom(dplyr,db_desc)
importFrom(dplyr,db_explain)
importFrom(dplyr,db_query_fields)
importFrom(dplyr,db_save_query)
importFrom(dplyr,db_write_table)
importFrom(dplyr,distinct)
importFrom(dplyr,do)
importFrom(dplyr,explain)
importFrom(dplyr,filter)
importFrom(dplyr,full_join)
importFrom(dplyr,group_by)
importFrom(dplyr,group_size)
importFrom(dplyr,group_vars)
importFrom(dplyr,groups)
importFrom(dplyr,inner_join)
importFrom(dplyr,intersect)
importFrom(dplyr,left_join)
importFrom(dplyr,mutate)
importFrom(dplyr,n)
importFrom(dplyr,n_groups)
importFrom(dplyr,pull)
importFrom(dplyr,relocate)
importFrom(dplyr,rename)
importFrom(dplyr,rename_with)
importFrom(dplyr,right_join)
importFrom(dplyr,same_src)
importFrom(dplyr,select)
importFrom(dplyr,semi_join)
importFrom(dplyr,setdiff)
importFrom(dplyr,show_query)
importFrom(dplyr,slice)
importFrom(dplyr,slice_head)
importFrom(dplyr,slice_max)
importFrom(dplyr,slice_min)
importFrom(dplyr,slice_sample)
importFrom(dplyr,slice_tail)
importFrom(dplyr,sql_join)
importFrom(dplyr,sql_select)
importFrom(dplyr,sql_semi_join)
importFrom(dplyr,sql_set_op)
importFrom(dplyr,sql_subquery)
importFrom(dplyr,sql_translate_env)
importFrom(dplyr,src_tbls)
importFrom(dplyr,summarise)
importFrom(dplyr,tally)
importFrom(dplyr,tbl)
importFrom(dplyr,tbl_vars)
importFrom(dplyr,transmute)
importFrom(dplyr,ungroup)
importFrom(dplyr,union)
importFrom(dplyr,union_all)
importFrom(glue,glue)
importFrom(magrittr,"%>%")
importFrom(methods,initialize)
importFrom(stats,setNames)
importFrom(stats,update)
importFrom(tibble,as_tibble)
importFrom(tibble,tbl_sum)
importFrom(tibble,tibble)
importFrom(tidyselect,everything)
importFrom(utils,head)
importFrom(utils,tail)
dbplyr/LICENSE 0000644 0001762 0000144 00000000052 13734215523 012555 0 ustar ligges users YEAR: 2013-2019
COPYRIGHT HOLDER: RStudio
dbplyr/README.md 0000755 0001762 0000144 00000007451 14032677433 013050 0 ustar ligges users
# dbplyr
[](https://cran.r-project.org/package=dbplyr)
[](https://github.com/tidyverse/dbplyr/actions)
[](https://codecov.io/gh/tidyverse/dbplyr?branch=master)
## Overview
dbplyr is the database backend for [dplyr](https://dplyr.tidyverse.org).
It allows you to use remote database tables as if they are in-memory
data frames by automatically converting dplyr code into SQL.
To learn more about why you might use dbplyr instead of writing SQL, see
`vignette("sql")`. To learn more about the details of the SQL
translation, see `vignette("translation-verb")` and
`vignette("translation-function")`.
## Installation
``` r
# The easiest way to get dbplyr is to install the whole tidyverse:
install.packages("tidyverse")
# Alternatively, install just dbplyr:
install.packages("dbplyr")
# Or the the development version from GitHub:
# install.packages("devtools")
devtools::install_github("tidyverse/dbplyr")
```
## Usage
dbplyr is designed to work with database tables as if they were local
data frames. To demonstrate this I’ll first create an in-memory SQLite
database and copy over a dataset:
``` r
library(dplyr, warn.conflicts = FALSE)
con <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
copy_to(con, mtcars)
```
Note that you don’t actually need to load dbplyr with `library(dbplyr)`;
dplyr automatically loads it for you when it sees you working with a
database. Database connections are coordinated by the DBI package. Learn
more at
Now you can retrieve a table using `tbl()` (see `?tbl_dbi` for more
details). Printing it just retrieves the first few rows:
``` r
mtcars2 <- tbl(con, "mtcars")
mtcars2
#> # Source: table [?? x 11]
#> # Database: sqlite 3.34.1 [:memory:]
#> mpg cyl disp hp drat wt qsec vs am gear carb
#>
#> 1 21 6 160 110 3.9 2.62 16.5 0 1 4 4
#> 2 21 6 160 110 3.9 2.88 17.0 0 1 4 4
#> 3 22.8 4 108 93 3.85 2.32 18.6 1 1 4 1
#> 4 21.4 6 258 110 3.08 3.22 19.4 1 0 3 1
#> 5 18.7 8 360 175 3.15 3.44 17.0 0 0 3 2
#> 6 18.1 6 225 105 2.76 3.46 20.2 1 0 3 1
#> 7 14.3 8 360 245 3.21 3.57 15.8 0 0 3 4
#> 8 24.4 4 147. 62 3.69 3.19 20 1 0 4 2
#> 9 22.8 4 141. 95 3.92 3.15 22.9 1 0 4 2
#> 10 19.2 6 168. 123 3.92 3.44 18.3 1 0 4 4
#> # … with more rows
```
All dplyr calls are evaluated lazily, generating SQL that is only sent
to the database when you request the data.
``` r
# lazily generates query
summary <- mtcars2 %>%
group_by(cyl) %>%
summarise(mpg = mean(mpg, na.rm = TRUE)) %>%
arrange(desc(mpg))
# see query
summary %>% show_query()
#>
#> SELECT `cyl`, AVG(`mpg`) AS `mpg`
#> FROM `mtcars`
#> GROUP BY `cyl`
#> ORDER BY `mpg` DESC
# execute query and retrieve results
summary %>% collect()
#> # A tibble: 3 x 2
#> cyl mpg
#>
#> 1 4 26.7
#> 2 6 19.7
#> 3 8 15.1
```
## Code of Conduct
Please note that the dbplyr project is released with a [Contributor Code
of Conduct](https://dbplyr.tidyverse.org/CODE_OF_CONDUCT.html). By
contributing to this project, you agree to abide by its terms.
dbplyr/man/ 0000755 0001762 0000144 00000000000 14031447261 012323 5 ustar ligges users dbplyr/man/do.tbl_sql.Rd 0000644 0001762 0000144 00000001500 13575012774 014660 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-do.R
\name{do.tbl_sql}
\alias{do.tbl_sql}
\title{Perform arbitrary computation on remote backend}
\usage{
\method{do}{tbl_sql}(.data, ..., .chunk_size = 10000L)
}
\arguments{
\item{.data}{a tbl}
\item{...}{Expressions to apply to each group. If named, results will be
stored in a new column. If unnamed, should return a data frame. You can
use \code{.} to refer to the current group. You can not mix named and
unnamed arguments.}
\item{.chunk_size}{The size of each chunk to pull into R. If this number is
too big, the process will be slow because R has to allocate and free a lot
of memory. If it's too small, it will be slow, because of the overhead of
talking to the database.}
}
\description{
Perform arbitrary computation on remote backend
}
dbplyr/man/sql_expr.Rd 0000644 0001762 0000144 00000002551 13734215523 014455 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sql-expr.R
\name{sql_expr}
\alias{sql_expr}
\alias{sql_call2}
\title{Generate SQL from R expressions}
\usage{
sql_expr(x, con = sql_current_con())
sql_call2(.fn, ..., con = sql_current_con())
}
\arguments{
\item{x}{A quasiquoted expression}
\item{con}{Connection to use for escaping. Will be set automatically when
called from a function translation.}
\item{.fn}{Function name (as string, call, or symbol)}
\item{...}{Arguments to function}
}
\description{
Low-level building block for generating SQL from R expressions.
Strings are escaped; names become bare SQL identifiers. User infix
functions have \verb{\%} stripped.
}
\details{
Using \code{sql_expr()} in package will require use of \code{\link[=globalVariables]{globalVariables()}}
to avoid \verb{R CMD check} NOTES. This is a small amount of additional pain,
which I think is worthwhile because it leads to more readable translation
code.
}
\examples{
con <- simulate_dbi() # not necessary when writing translations
sql_expr(f(x + 1), con = con)
sql_expr(f("x", "y"), con = con)
sql_expr(f(x, y), con = con)
x <- ident("x")
sql_expr(f(!!x, y), con = con)
sql_expr(cast("x" \%as\% DECIMAL), con = con)
sql_expr(round(x) \%::\% numeric, con = con)
sql_call2("+", quote(x), 1, con = con)
sql_call2("+", "x", 1, con = con)
}
\keyword{internal}
dbplyr/man/testing.Rd 0000644 0001762 0000144 00000001566 14002647450 014277 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test-frame.R
\name{testing}
\alias{testing}
\alias{test_register_src}
\alias{test_register_con}
\alias{src_test}
\alias{test_load}
\alias{test_frame}
\title{Infrastructure for testing dplyr}
\usage{
test_register_src(name, src)
test_register_con(name, ...)
src_test(name)
test_load(
df,
name = unique_table_name(),
srcs = test_srcs$get(),
ignore = character()
)
test_frame(..., srcs = test_srcs$get(), ignore = character())
}
\description{
Register testing sources, then use \code{test_load()} to load an existing
data frame into each source. To create a new table in each source,
use \code{test_frame()}.
}
\examples{
\dontrun{
test_register_src("sqlite", {
DBI::dbConnect(RSQLite::SQLite(), ":memory:", create = TRUE)
})
test_frame(x = 1:3, y = 3:1)
test_load(mtcars)
}
}
\keyword{internal}
dbplyr/man/pivot_longer.tbl_lazy.Rd 0000644 0001762 0000144 00000006011 14004012136 017123 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-pivot-longer.R
\name{pivot_longer.tbl_lazy}
\alias{pivot_longer.tbl_lazy}
\title{Pivot data from wide to long}
\usage{
pivot_longer.tbl_lazy(
data,
cols,
names_to = "name",
names_prefix = NULL,
names_sep = NULL,
names_pattern = NULL,
names_ptypes = list(),
names_transform = list(),
names_repair = "check_unique",
values_to = "value",
values_drop_na = FALSE,
values_ptypes,
values_transform = list(),
...
)
}
\arguments{
\item{data}{A data frame to pivot.}
\item{cols}{Columns to pivot into longer format.}
\item{names_to}{A string specifying the name of the column to create
from the data stored in the column names of \code{data}.}
\item{names_prefix}{A regular expression used to remove matching text
from the start of each variable name.}
\item{names_sep, names_pattern}{If \code{names_to} contains multiple values,
these arguments control how the column name is broken up.}
\item{names_ptypes}{A list of column name-prototype pairs.}
\item{names_transform, values_transform}{A list of column name-function pairs.}
\item{names_repair}{What happens if the output has invalid column names?}
\item{values_to}{A string specifying the name of the column to create
from the data stored in cell values. If \code{names_to} is a character
containing the special \code{.value} sentinel, this value will be ignored,
and the name of the value column will be derived from part of the
existing column names.}
\item{values_drop_na}{If \code{TRUE}, will drop rows that contain only \code{NA}s
in the \code{value_to} column.}
\item{values_ptypes}{Not supported.}
\item{...}{Additional arguments passed on to methods.}
}
\description{
\code{pivot_longer()} "lengthens" data, increasing the number of rows and
decreasing the number of columns. The inverse transformation is
`tidyr::pivot_wider()]
Learn more in \code{vignette("pivot", "tidyr")}.
While most functionality is identical there are some differences to
\code{pivot_longer()} on local data frames:
\itemize{
\item the output is sorted differently/not explicitly,
\item the coercion of mixed column types is left to the database,
\item \code{values_ptypes} NOT supported.
}
Note that \code{build_longer_spec()} and \code{pivot_longer_spec()} do not work with
remote tables.
}
\details{
The SQL translation basically works as follows:
\enumerate{
\item split the specification by its key columns i.e. by variables crammed
into the column names.
\item for each part in the splitted specification \code{transmute()} \code{data} into the
following columns
}
\itemize{
\item id columns i.e. columns that are not pivotted
\item key columns
\item value columns i.e. columns that are pivotted
}
\enumerate{
\item combine all the parts with \code{union_all()}
}
}
\examples{
# See vignette("pivot") for examples and explanation
# Simplest case where column names are character data
if (require("tidyr", quietly = TRUE)) {
memdb_frame(
id = c("a", "b"),
x = 1:2,
y = 3:4
) \%>\%
pivot_longer(-id)
}
}
dbplyr/man/filter.tbl_lazy.Rd 0000644 0001762 0000144 00000002077 14015732330 015720 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-filter.R
\name{filter.tbl_lazy}
\alias{filter.tbl_lazy}
\title{Subset rows using column values}
\usage{
\method{filter}{tbl_lazy}(.data, ..., .preserve = FALSE)
}
\arguments{
\item{.data}{A lazy data frame backed by a database query.}
\item{...}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Variables, or functions of
variables. Use \code{\link[dplyr:desc]{desc()}} to sort a variable in descending order.}
\item{.preserve}{Not supported by this method.}
}
\value{
Another \code{tbl_lazy}. Use \code{\link[=show_query]{show_query()}} to see the generated
query, and use \code{\link[=collect.tbl_sql]{collect()}} to execute the query
and return data to R.
}
\description{
This is a method for the dplyr \code{\link[=filter]{filter()}} generic. It generates the
\code{WHERE} clause of the SQL query.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(x = c(2, NA, 5, NA, 10), y = 1:5)
db \%>\% filter(x < 5) \%>\% show_query()
db \%>\% filter(is.na(x)) \%>\% show_query()
}
dbplyr/man/backend-odbc.Rd 0000644 0001762 0000144 00000001375 14002647450 015114 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backend-odbc.R
\name{backend-odbc}
\alias{simulate_odbc}
\title{Backend: ODBC}
\usage{
simulate_odbc()
}
\description{
See \code{vignette("translate-function")} and \code{vignette("translate-verb")} for
details of overall translation technology. Key differences for this backend
are minor translations for common data types.
Use \code{simulate_odbc()} with \code{lazy_frame()} to see simulated SQL without
converting to live access database.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
lf <- lazy_frame(a = TRUE, b = 1, d = 2, c = "z", con = simulate_odbc())
lf \%>\% transmute(x = as.numeric(b))
lf \%>\% transmute(x = as.integer(b))
lf \%>\% transmute(x = as.character(b))
}
dbplyr/man/collapse.tbl_sql.Rd 0000644 0001762 0000144 00000003435 14002647450 016060 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-compute.R
\name{collapse.tbl_sql}
\alias{collapse.tbl_sql}
\alias{compute.tbl_sql}
\alias{collect.tbl_sql}
\title{Compute results of a query}
\usage{
\method{collapse}{tbl_sql}(x, ...)
\method{compute}{tbl_sql}(
x,
name = unique_table_name(),
temporary = TRUE,
unique_indexes = list(),
indexes = list(),
analyze = TRUE,
...
)
\method{collect}{tbl_sql}(x, ..., n = Inf, warn_incomplete = TRUE)
}
\arguments{
\item{x}{A lazy data frame backed by a database query.}
\item{...}{other parameters passed to methods.}
\item{name}{Table name in remote database.}
\item{temporary}{Should the table be temporary (\code{TRUE}, the default\verb{) or persistent (}FALSE`)?}
\item{unique_indexes}{a list of character vectors. Each element of the list
will create a new unique index over the specified column(s). Duplicate rows
will result in failure.}
\item{indexes}{a list of character vectors. Each element of the list
will create a new index.}
\item{analyze}{if \code{TRUE} (the default), will automatically ANALYZE the
new table so that the query optimiser has useful information.}
\item{n}{Number of rows to fetch. Defaults to \code{Inf}, meaning all rows.}
\item{warn_incomplete}{Warn if \code{n} is less than the number of result rows?}
}
\description{
These are methods for the dplyr generics \code{\link[=collapse]{collapse()}}, \code{\link[=compute]{compute()}},
and \code{\link[=collect]{collect()}}. \code{collapse()} creates a subquery, \code{compute()} stores
the results in a remote table, and \code{collect()} executes the query and
downloads the data into R.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(a = c(3, 4, 1, 2), b = c(5, 1, 2, NA))
db \%>\% filter(a <= 2) \%>\% collect()
}
dbplyr/man/select.tbl_lazy.Rd 0000644 0001762 0000144 00000003720 14015732330 015706 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-select.R
\name{select.tbl_lazy}
\alias{select.tbl_lazy}
\alias{rename.tbl_lazy}
\alias{rename_with.tbl_lazy}
\alias{relocate.tbl_lazy}
\title{Subset, rename, and reorder columns using their names}
\usage{
\method{select}{tbl_lazy}(.data, ...)
\method{rename}{tbl_lazy}(.data, ...)
\method{rename_with}{tbl_lazy}(.data, .fn, .cols = everything(), ...)
\method{relocate}{tbl_lazy}(.data, ..., .before = NULL, .after = NULL)
}
\arguments{
\item{.data}{A lazy data frame backed by a database query.}
\item{...}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Variables, or functions of
variables. Use \code{\link[dplyr:desc]{desc()}} to sort a variable in descending order.}
\item{.fn}{A function used to transform the selected \code{.cols}. Should
return a character vector the same length as the input.}
\item{.cols}{<\code{\link[dplyr:dplyr_tidy_select]{tidy-select}}> Columns to rename;
defaults to all columns.}
\item{.before}{<\code{\link[dplyr:dplyr_tidy_select]{tidy-select}}> Destination of
columns selected by \code{...}. Supplying neither will move columns to the
left-hand side; specifying both is an error.}
\item{.after}{<\code{\link[dplyr:dplyr_tidy_select]{tidy-select}}> Destination of
columns selected by \code{...}. Supplying neither will move columns to the
left-hand side; specifying both is an error.}
}
\description{
These are methods for the dplyr \code{\link[=select]{select()}}, \code{\link[=rename]{rename()}}, and \code{\link[=relocate]{relocate()}}
generics. They generate the \code{SELECT} clause of the SQL query.
These functions do not support predicate functions, i.e. you can
not use \code{where(is.numeric)} to select all numeric variables.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(x = 1, y = 2, z = 3)
db \%>\% select(-y) \%>\% show_query()
db \%>\% relocate(z) \%>\% show_query()
db \%>\% rename(first = x, last = z) \%>\% show_query()
}
dbplyr/man/backend-impala.Rd 0000644 0001762 0000144 00000001305 14002647450 015441 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backend-impala.R
\name{backend-impala}
\title{Backend: Impala}
\description{
See \code{vignette("translate-function")} and \code{vignette("translate-verb")} for
details of overall translation technology. Key differences for this backend
are a scattering of custom translations provided by users, mostly focussed
on bitwise operations.
Use \code{simulate_impala()} with \code{lazy_frame()} to see simulated SQL without
converting to live access database.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
lf <- lazy_frame(a = TRUE, b = 1, c = 2, d = "z", con = simulate_impala())
lf \%>\% transmute(X = bitwNot(bitwOr(b, c)))
}
dbplyr/man/replace_na.tbl_lazy.Rd 0000644 0001762 0000144 00000001703 14004012136 016510 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-expand.R
\name{replace_na.tbl_lazy}
\alias{replace_na.tbl_lazy}
\title{Replace NAs with specified values}
\usage{
replace_na.tbl_lazy(data, replace = list(), ...)
}
\arguments{
\item{data}{A pair of lazy data frame backed by database queries.}
\item{replace}{A named list of values, with one value for each column that
has NA values to be replaced.}
\item{...}{Unused; included for compatibility with generic.}
}
\value{
Another \code{tbl_lazy}. Use \code{\link[=show_query]{show_query()}} to see the generated
query, and use \code{\link[=collect.tbl_sql]{collect()}} to execute the query
and return data to R.
}
\description{
This is a method for the \code{\link[tidyr:replace_na]{tidyr::replace_na()}} generic.
}
\examples{
if (require("tidyr", quietly = TRUE)) {
df <- memdb_frame(x = c(1, 2, NA), y = c("a", NA, "b"))
df \%>\% replace_na(list(x = 0, y = "unknown"))
}
}
dbplyr/man/backend-hana.Rd 0000644 0001762 0000144 00000001772 14002647450 015115 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backend-hana.R
\name{backend-hana}
\alias{simulate_hana}
\title{Backend: SAP HANA}
\usage{
simulate_hana()
}
\description{
See \code{vignette("translate-function")} and \code{vignette("translate-verb")} for
details of overall translation technology. Key differences for this backend
are:
\itemize{
\item Temporary tables get \verb{#} prefix and use \verb{LOCAL TEMPORARY COLUMN}.
\item No table analysis performed in \code{\link[=copy_to]{copy_to()}}.
\item \code{paste()} uses \code{||}
\item Note that you can't create new boolean columns from logical expressions;
you need to wrap with explicit \code{ifelse}: \code{ifelse(x > y, TRUE, FALSE)}.
}
Use \code{simulate_hana()} with \code{lazy_frame()} to see simulated SQL without
converting to live access database.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
lf <- lazy_frame(a = TRUE, b = 1, c = 2, d = "z", con = simulate_hana())
lf \%>\% transmute(x = paste0(z, " times"))
}
dbplyr/man/head.tbl_lazy.Rd 0000644 0001762 0000144 00000002557 14002647450 015343 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-head.R
\name{head.tbl_lazy}
\alias{head.tbl_lazy}
\title{Subset the first rows}
\usage{
\method{head}{tbl_lazy}(x, n = 6L, ...)
}
\arguments{
\item{x}{A lazy data frame backed by a database query.}
\item{n}{Number of rows to return}
\item{...}{Not used.}
}
\value{
Another \code{tbl_lazy}. Use \code{\link[=show_query]{show_query()}} to see the generated
query, and use \code{\link[=collect.tbl_sql]{collect()}} to execute the query
and return data to R.
}
\description{
This is a method for the \code{\link[=head]{head()}} generic. It is usually translated to the
\code{LIMIT} clause of the SQL query. Because \code{LIMIT} is not an official part of
the SQL specification, some database use other clauses like \code{TOP} or
\verb{FETCH ROWS}.
Note that databases don't really have a sense of row order, so what "first"
means is subject to interpretation. Most databases will respect ordering
performed with \code{arrange()}, but it's not guaranteed. \code{tail()} is not
supported at all because the situation is even murkier for the "last" rows.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(x = 1:100)
db \%>\% head() \%>\% show_query()
# Pretend we have data in a SQL server database
db2 <- lazy_frame(x = 1:100, con = simulate_mssql())
db2 \%>\% head() \%>\% show_query()
}
dbplyr/man/db-misc.Rd 0000644 0001762 0000144 00000002341 14002647450 014130 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db.R
\name{db-misc}
\alias{db_connection_describe}
\alias{sql_join_suffix}
\alias{db_sql_render}
\alias{dbplyr_edition}
\title{Miscellaneous database generics}
\usage{
db_connection_describe(con)
sql_join_suffix(con, ...)
db_sql_render(con, sql, ...)
dbplyr_edition(con)
}
\description{
\itemize{
\item \code{db_connection_describe()} provides a short string describing the
database connection, helping users tell which database a table comes
from. It should be a single line, and ideally less than 60 characters wide.
}
}
\details{
\itemize{
\item \code{dbplyr_edition()} declares which version of the dbplyr API you want.
See below for more details.
}
}
\section{dplyr 2.0.0}{
dplyr 2.0.0 renamed a number of generics so that they could be cleanly moved
from dplyr to dbplyr. If you have an existing backend, you'll need to rename
the following methods.
\itemize{
\item \code{dplyr::db_desc()} -> \code{dbplyr::db_connection_describe()} (also note that
the argument named changed from \code{x} to \code{con}).
}
}
\seealso{
Other generic:
\code{\link{db-sql}},
\code{\link{db_copy_to}()},
\code{\link{sql_escape_logical}()}
}
\concept{generic}
\keyword{internal}
dbplyr/man/sql_build.Rd 0000644 0001762 0000144 00000004503 14002647450 014572 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query-join.R, R/query-select.R,
% R/query-semi-join.R, R/query-set-op.R, R/sql-build.R
\name{join_query}
\alias{join_query}
\alias{select_query}
\alias{semi_join_query}
\alias{set_op_query}
\alias{sql_build}
\alias{sql_render}
\alias{sql_optimise}
\title{Build and render SQL from a sequence of lazy operations}
\usage{
join_query(
x,
y,
vars,
type = "inner",
by = NULL,
suffix = c(".x", ".y"),
na_matches = FALSE
)
select_query(
from,
select = sql("*"),
where = character(),
group_by = character(),
having = character(),
order_by = character(),
limit = NULL,
distinct = FALSE
)
semi_join_query(x, y, anti = FALSE, by = NULL, na_matches = FALSE)
set_op_query(x, y, type = type, all = FALSE)
sql_build(op, con = NULL, ...)
sql_render(query, con = NULL, ..., subquery = FALSE)
sql_optimise(x, con = NULL, ..., subquery = FALSE)
}
\arguments{
\item{op}{A sequence of lazy operations}
\item{con}{A database connection. The default \code{NULL} uses a set of
rules that should be very similar to ANSI 92, and allows for testing
without an active database connection.}
\item{...}{Other arguments passed on to the methods. Not currently used.}
\item{subquery}{Is this SQL going to be used in a subquery?
This is important because you can place a bare table name in a subquery
and ORDER BY does not work in subqueries.}
}
\description{
\code{sql_build()} creates a \code{select_query} S3 object, that is rendered
to a SQL string by \code{sql_render()}. The output from \code{sql_build()} is
designed to be easy to test, as it's database agnostic, and has
a hierarchical structure. Outside of testing, however, you should
always call \code{sql_render()}.
}
\details{
\code{sql_build()} is generic over the lazy operations, \link{lazy_ops},
and generates an S3 object that represents the query. \code{sql_render()}
takes a query object and then calls a function that is generic
over the database. For example, \code{sql_build.op_mutate()} generates
a \code{select_query}, and \code{sql_render.select_query()} calls
\code{sql_select()}, which has different methods for different databases.
The default methods should generate ANSI 92 SQL where possible, so you
backends only need to override the methods if the backend is not ANSI
compliant.
}
\keyword{internal}
dbplyr/man/dbplyr-package.Rd 0000644 0001762 0000144 00000002022 14004012136 015460 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbplyr.R
\docType{package}
\name{dbplyr-package}
\alias{dbplyr}
\alias{dbplyr-package}
\title{dbplyr: A 'dplyr' Back End for Databases}
\description{
\if{html}{\figure{logo.png}{options: align='right' alt='logo' width='120'}}
A 'dplyr' back end for databases that allows you to
work with remote database tables as if they are in-memory data frames.
Basic features works with any database that has a 'DBI' back end; more
advanced features require 'SQL' translation to be provided by the
package author.
}
\seealso{
Useful links:
\itemize{
\item \url{https://dbplyr.tidyverse.org/}
\item \url{https://github.com/tidyverse/dbplyr}
\item Report bugs at \url{https://github.com/tidyverse/dbplyr/issues}
}
}
\author{
\strong{Maintainer}: Hadley Wickham \email{hadley@rstudio.com}
Authors:
\itemize{
\item Maximilian Girlich
\item Edgar Ruiz
}
Other contributors:
\itemize{
\item RStudio [copyright holder, funder]
}
}
\keyword{internal}
dbplyr/man/db-sql.Rd 0000644 0001762 0000144 00000010460 14002647450 013775 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db-sql.R
\name{db-sql}
\alias{db-sql}
\alias{sql_expr_matches}
\alias{sql_translation}
\alias{sql_table_analyze}
\alias{sql_table_index}
\alias{sql_query_explain}
\alias{sql_query_fields}
\alias{sql_query_save}
\alias{sql_query_wrap}
\alias{sql_query_rows}
\alias{sql_query_select}
\alias{sql_query_join}
\alias{sql_query_semi_join}
\alias{sql_query_set_op}
\title{SQL generation generics}
\usage{
sql_expr_matches(con, x, y)
sql_translation(con)
sql_table_analyze(con, table, ...)
sql_table_index(con, table, columns, name = NULL, unique = FALSE, ...)
sql_query_explain(con, sql, ...)
sql_query_fields(con, sql, ...)
sql_query_save(con, sql, name, temporary = TRUE, ...)
sql_query_wrap(con, from, name = unique_subquery_name(), ...)
sql_query_rows(con, sql, ...)
sql_query_select(
con,
select,
from,
where = NULL,
group_by = NULL,
having = NULL,
order_by = NULL,
limit = NULL,
distinct = FALSE,
...,
subquery = FALSE
)
sql_query_join(
con,
x,
y,
vars,
type = "inner",
by = NULL,
na_matches = FALSE,
...
)
sql_query_semi_join(con, x, y, anti = FALSE, by = NULL, ...)
sql_query_set_op(con, x, y, method, ..., all = FALSE)
}
\description{
SQL translation:
\itemize{
\item \code{sql_expr_matches(con, x, y)} generates an alternative to \code{x = y} when a
pair of \code{NULL}s should match. The default translation uses a \verb{CASE WHEN}
as described in \url{https://modern-sql.com/feature/is-distinct-from}.
\item \code{sql_translation(con)} generates a SQL translation environment.
}
Tables:
\itemize{
\item \code{sql_table_analyze(con, table)} generates SQL that "analyzes" the table,
ensuring that the database has up-to-date statistics for use in the query
planner. It called from \code{\link[=copy_to]{copy_to()}} when \code{analyze = TRUE}.
\item \code{sql_table_index()} generates SQL for adding an index to table. The
}
Query manipulation:
\itemize{
\item \code{sql_query_explain(con, sql)} generates SQL that "explains" a query,
i.e. generates a query plan describing what indexes etc that the
database will use.
\item \code{sql_query_fields()} generates SQL for a 0-row result that is used to
capture field names in \code{\link[=tbl_sql]{tbl_sql()}}
\item \code{sql_query_save(con, sql)} generates SQL for saving a query into a
(temporary) table.
\item \code{sql_query_wrap(con, from)} generates SQL for wrapping a query into a
subquery.
}
Query generation:
\itemize{
\item \code{sql_query_select()} generate SQL for a \code{SELECT} query
\item \code{sql_query_join()} generate SQL for joins
\item \code{sql_query_semi_join()} generate SQL for semi- and anti-joins
\item \code{sql_query_set_op()} generate SQL for \code{UNION}, \code{INTERSECT}, and \code{EXCEPT}
queries.
}
}
\section{dbplyr 2.0.0}{
Many \verb{dplyr::db_*} generics have been replaced by \verb{dbplyr::sql_*} generics.
To update your backend, you'll need to extract the SQL generation out of your
existing code, and place it in a new method for a dbplyr \code{sql_} generic.
\itemize{
\item \code{dplyr::db_analyze()} is replaced by \code{dbplyr::sql_table_analyze()}
\item \code{dplyr::db_explain()} is replaced by \code{dbplyr::sql_query_explain()}
\item \code{dplyr::db_create_index()} is replaced by \code{dbplyr::sql_table_index()}
\item \code{dplyr::db_query_fields()} is replaced by \code{dbplyr::sql_query_fields()}
\item \code{dplyr::db_query_rows()} is no longer used; you can delete it
\item \code{dplyr::db_save_query()} is replaced by \code{dbplyr::sql_query_save()}
}
The query generating functions have also changed names. Their behaviour is
unchanged, so you just need to rename the generic and import from dbplyr
instead of dplyr.
\itemize{
\item \code{dplyr::sql_select()} is replaced by \code{dbplyr::sql_query_select()}
\item \code{dplyr::sql_join()} is replaced by \code{dbplyr::sql_query_join()}
\item \code{dplyr::sql_semi_join()} is replaced by \code{dbplyr::sql_query_semi_join()}
\item \code{dplyr::sql_set_op()} is replaced by \code{dbplyr::sql_query_set_op()}
\item \code{dplyr::sql_subquery()} is replaced by \code{dbplyr::sql_query_wrap()}
}
Learn more in \code{vignette("backend-2.0")}
}
\seealso{
Other generic:
\code{\link{db_connection_describe}()},
\code{\link{db_copy_to}()},
\code{\link{sql_escape_logical}()}
}
\concept{generic}
\keyword{internal}
dbplyr/man/ident_q.Rd 0000644 0001762 0000144 00000000441 14002647450 014234 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schema.R
\name{ident_q}
\alias{ident_q}
\title{Declare a identifer as being pre-quoted.}
\usage{
ident_q(...)
}
\description{
No longer needed; please use \code{\link[=sql]{sql()}} instead.
}
\keyword{internal}
dbplyr/man/mutate.tbl_lazy.Rd 0000644 0001762 0000144 00000002252 14015732330 015725 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-mutate.R
\name{mutate.tbl_lazy}
\alias{mutate.tbl_lazy}
\title{Create, modify, and delete columns}
\usage{
\method{mutate}{tbl_lazy}(.data, ...)
}
\arguments{
\item{.data}{A lazy data frame backed by a database query.}
\item{...}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Variables, or functions of
variables. Use \code{\link[dplyr:desc]{desc()}} to sort a variable in descending order.}
}
\value{
Another \code{tbl_lazy}. Use \code{\link[=show_query]{show_query()}} to see the generated
query, and use \code{\link[=collect.tbl_sql]{collect()}} to execute the query
and return data to R.
}
\description{
These are methods for the dplyr \code{\link[=mutate]{mutate()}} and \code{\link[=transmute]{transmute()}} generics.
They are translated to computed expressions in the \code{SELECT} clause of
the SQL query.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(x = 1:5, y = 5:1)
db \%>\%
mutate(a = (x + y) / 2, b = sqrt(x^2L + y^2L)) \%>\%
show_query()
# dbplyr automatically creates subqueries as needed
db \%>\%
mutate(x1 = x + 1, x2 = x1 * 2) \%>\%
show_query()
}
dbplyr/man/nycflights13.Rd 0000644 0001762 0000144 00000001535 14002647450 015134 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-nycflights13.R
\name{nycflights13}
\alias{nycflights13}
\alias{nycflights13_sqlite}
\alias{nycflights13_postgres}
\alias{has_nycflights13}
\alias{copy_nycflights13}
\title{Database versions of the nycflights13 data}
\usage{
nycflights13_sqlite(path = NULL)
nycflights13_postgres(dbname = "nycflights13", ...)
has_nycflights13(type = c("sqlite", "postgresql"), ...)
copy_nycflights13(con, ...)
}
\arguments{
\item{path}{location of SQLite database file}
\item{dbname, ...}{Arguments passed on to \code{\link[=src_postgres]{src_postgres()}}}
}
\description{
These functions cache the data from the \code{nycflights13} database in
a local database, for use in examples and vignettes. Indexes are created
to making joining tables on natural keys efficient.
}
\keyword{internal}
dbplyr/man/pivot_wider.tbl_lazy.Rd 0000644 0001762 0000144 00000006307 14004012136 016757 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-pivot-wider.R
\name{pivot_wider.tbl_lazy}
\alias{pivot_wider.tbl_lazy}
\title{Pivot data from long to wide}
\usage{
pivot_wider.tbl_lazy(
data,
id_cols = NULL,
names_from = name,
names_prefix = "",
names_sep = "_",
names_glue = NULL,
names_sort = FALSE,
names_repair = "check_unique",
values_from = value,
values_fill = NULL,
values_fn = max,
...
)
}
\arguments{
\item{data}{A lazy data frame backed by a database query.}
\item{id_cols}{A set of columns that uniquely identifies each observation.}
\item{names_from, values_from}{A pair of
arguments describing which column (or columns) to get the name of the
output column (\code{names_from}), and which column (or columns) to get the
cell values from (\code{values_from}).
If \code{values_from} contains multiple values, the value will be added to the
front of the output column.}
\item{names_prefix}{String added to the start of every variable name.}
\item{names_sep}{If \code{names_from} or \code{values_from} contains multiple
variables, this will be used to join their values together into a single
string to use as a column name.}
\item{names_glue}{Instead of \code{names_sep} and \code{names_prefix}, you can supply
a glue specification that uses the \code{names_from} columns (and special
\code{.value}) to create custom column names.}
\item{names_sort}{Should the column names be sorted? If \code{FALSE}, the default,
column names are ordered by first appearance.}
\item{names_repair}{What happens if the output has invalid column names?}
\item{values_fill}{Optionally, a (scalar) value that specifies what each
\code{value} should be filled in with when missing.}
\item{values_fn}{A function, the default is \code{max()}, applied to the \code{value}
in each cell in the output. In contrast to local data frames it must not be
\code{NULL}.}
\item{...}{Unused; included for compatibility with generic.}
}
\description{
\code{pivot_wider()} "widens" data, increasing the number of columns and
decreasing the number of rows. The inverse transformation is
\code{pivot_longer()}.
Learn more in \code{vignette("pivot", "tidyr")}.
}
\details{
The big difference to \code{pivot_wider()} for local data frames is that
\code{values_fn} must not be \code{NULL}. By default it is \code{max()} which yields
the same results as for local data frames if the combination of \code{id_cols}
and \code{value} column uniquely identify an observation.
Mind that you also do not get a warning if an observation is not uniquely
identified.
The translation to SQL code basically works as follows:
\enumerate{
\item Get unique keys in \code{names_from} column.
\item For each key value generate an expression of the form:\if{html}{\out{
}}\preformatted{value_fn(
CASE WHEN (`names from column` == `key value`)
THEN (`value column`)
END
) AS `output column`
}\if{html}{\out{
}}
\item Group data by id columns.
\item Summarise the grouped data with the expressions from step 2.
}
}
\examples{
if (require("tidyr", quietly = TRUE)) {
memdb_frame(
id = 1,
key = c("x", "y"),
value = 1:2
) \%>\%
tidyr::pivot_wider(
id_cols = id,
names_from = key,
values_from = value
)
}
}
dbplyr/man/backend-hive.Rd 0000644 0001762 0000144 00000001502 14002647450 015130 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backend-hive.R
\name{backend-hive}
\title{Backend: Hive}
\description{
See \code{vignette("translate-function")} and \code{vignette("translate-verb")} for
details of overall translation technology. Key differences for this backend
are a scattering of custom translations provided by users.
Use \code{simulate_hive()} with \code{lazy_frame()} to see simulated SQL without
converting to live access database.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
lf <- lazy_frame(a = TRUE, b = 1, d = 2, c = "z", con = simulate_hive())
lf \%>\% transmute(x = cot(b))
lf \%>\% transmute(x = bitwShiftL(c, 1L))
lf \%>\% transmute(x = str_replace_all(z, "a", "b"))
lf \%>\% summarise(x = median(d, na.rm = TRUE))
lf \%>\% summarise(x = var(c, na.rm = TRUE))
}
dbplyr/man/sql.Rd 0000644 0001762 0000144 00000001240 14002647450 013406 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sql.R
\name{sql}
\alias{sql}
\alias{is.sql}
\alias{as.sql}
\title{SQL escaping.}
\usage{
sql(...)
is.sql(x)
as.sql(x, con)
}
\arguments{
\item{...}{Character vectors that will be combined into a single SQL
expression.}
\item{x}{Object to coerce}
\item{con}{Needed when \code{x} is directly suppled from the user so that
schema specifications can be quoted using the correct identifiers.}
}
\description{
These functions are critical when writing functions that translate R
functions to sql functions. Typically a conversion function should escape
all its inputs and return an sql object.
}
dbplyr/man/tbl_sql.Rd 0000644 0001762 0000144 00000001263 14002647450 014254 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tbl-sql.R
\name{tbl_sql}
\alias{tbl_sql}
\title{Create an SQL tbl (abstract)}
\usage{
tbl_sql(subclass, src, from, ..., vars = NULL)
}
\arguments{
\item{subclass}{name of subclass}
\item{...}{needed for agreement with generic. Not otherwise used.}
\item{vars}{Provide column names as a character vector
to avoid retrieving them from the database.
Mainly useful for better performance when creating
multiple \code{tbl} objects.}
}
\description{
Generally, you should no longer need to provide a custom \code{tbl()}
method.
The default \code{tbl.DBIConnect} method should work in most cases.
}
\keyword{internal}
dbplyr/man/count.tbl_lazy.Rd 0000644 0001762 0000144 00000003340 14015732330 015555 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-count.R
\name{count.tbl_lazy}
\alias{count.tbl_lazy}
\alias{tally.tbl_lazy}
\title{Count observations by group}
\usage{
\method{count}{tbl_lazy}(x, ..., wt = NULL, sort = FALSE, name = NULL)
\method{tally}{tbl_lazy}(x, wt = NULL, sort = FALSE, name = NULL)
}
\arguments{
\item{x}{A data frame, data frame extension (e.g. a tibble), or a
lazy data frame (e.g. from dbplyr or dtplyr).}
\item{...}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Variables, or functions of
variables. Use \code{\link[dplyr:desc]{desc()}} to sort a variable in descending order.}
\item{wt}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Frequency weights.
Can be \code{NULL} or a variable:
\itemize{
\item If \code{NULL} (the default), counts the number of rows in each group.
\item If a variable, computes \code{sum(wt)} for each group.
}}
\item{sort}{If \code{TRUE}, will show the largest groups at the top.}
\item{name}{The name of the new column in the output.
If omitted, it will default to \code{n}. If there's already a column called \code{n},
it will error, and require you to specify the name.}
}
\description{
These are methods for the dplyr \code{\link[=count]{count()}} and \code{\link[=tally]{tally()}} generics. They
wrap up \code{\link[=group_by.tbl_lazy]{group_by.tbl_lazy()}}, \code{\link[=summarise.tbl_lazy]{summarise.tbl_lazy()}} and, optionally,
\code{\link[=arrange.tbl_lazy]{arrange.tbl_lazy()}}.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(g = c(1, 1, 1, 2, 2), x = c(4, 3, 6, 9, 2))
db \%>\% count(g) \%>\% show_query()
db \%>\% count(g, wt = x) \%>\% show_query()
db \%>\% count(g, wt = x, sort = TRUE) \%>\% show_query()
}
dbplyr/man/backend-mssql.Rd 0000644 0001762 0000144 00000004617 14002647450 015346 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backend-mssql.R
\name{backend-mssql}
\title{Backend: SQL server}
\arguments{
\item{version}{Version of MS SQL to simulate. Currently only, difference is
that 15.0 and above will use \code{TRY_CAST()} instead of \code{CAST()}.}
}
\description{
See \code{vignette("translate-function")} and \code{vignette("translate-verb")} for
details of overall translation technology. Key differences for this backend
are:
\itemize{
\item \code{SELECT} uses \code{TOP} not \code{LIMIT}
\item Automatically prefixes \verb{#} to create temporary tables. Add the prefix
yourself to avoid the message.
\item String basics: \code{paste()}, \code{substr()}, \code{nchar()}
\item Custom types for \verb{as.*} functions
\item Lubridate extraction functions, \code{year()}, \code{month()}, \code{day()} etc
\item Semi-automated bit <-> boolean translation (see below)
}
Use \code{simulate_mssql()} with \code{lazy_frame()} to see simulated SQL without
converting to live access database.
}
\section{Bit vs boolean}{
SQL server uses two incompatible types to represent \code{TRUE} and \code{FALSE}
values:
\itemize{
\item The \code{BOOLEAN} type is the result of logical comparisons (e.g. \code{x > y})
and can be used \code{WHERE} but not to create new columns in \code{SELECT}.
\url{https://docs.microsoft.com/en-us/sql/t-sql/language-elements/comparison-operators-transact-sql}
\item The \code{BIT} type is a special type of numeric column used to store
\code{TRUE} and \code{FALSE} values, but can't be used in \code{WHERE} clauses.
\url{https://docs.microsoft.com/en-us/sql/t-sql/data-types/bit-transact-sql?view=sql-server-ver15}
}
dbplyr does its best to automatically create the correct type when needed,
but can't do it 100\% correctly because it does not have a full type
inference system. This means that you many need to manually do conversions
from time to time.
\itemize{
\item To convert from bit to boolean use \code{x == 1}
\item To convert from boolean to bit use \verb{as.logical(if(x, 0, 1))}
}
}
\examples{
library(dplyr, warn.conflicts = FALSE)
lf <- lazy_frame(a = TRUE, b = 1, c = 2, d = "z", con = simulate_mssql())
lf \%>\% head()
lf \%>\% transmute(x = paste(b, c, d))
# Can use boolean as is:
lf \%>\% filter(c > d)
# Need to convert from boolean to bit:
lf \%>\% transmute(x = c > d)
# Can use boolean as is:
lf \%>\% transmute(x = ifelse(c > d, "c", "d"))
}
dbplyr/man/arrange.tbl_lazy.Rd 0000644 0001762 0000144 00000003263 14015732330 016050 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-arrange.R
\name{arrange.tbl_lazy}
\alias{arrange.tbl_lazy}
\title{Arrange rows by column values}
\usage{
\method{arrange}{tbl_lazy}(.data, ..., .by_group = FALSE)
}
\arguments{
\item{.data}{A lazy data frame backed by a database query.}
\item{...}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Variables, or functions of
variables. Use \code{\link[dplyr:desc]{desc()}} to sort a variable in descending order.}
\item{.by_group}{If \code{TRUE}, will sort first by grouping variable. Applies to
grouped data frames only.}
}
\value{
Another \code{tbl_lazy}. Use \code{\link[=show_query]{show_query()}} to see the generated
query, and use \code{\link[=collect.tbl_sql]{collect()}} to execute the query
and return data to R.
}
\description{
This is an method for the dplyr \code{\link[=arrange]{arrange()}} generic. It generates
the \verb{ORDER BY} clause of the SQL query. It also affects the
\code{\link[=window_order]{window_order()}} of windowed expressions in \code{\link[=mutate.tbl_lazy]{mutate.tbl_lazy()}}.
Note that \verb{ORDER BY} clauses can not generally appear in subqueries, which
means that you should \code{arrange()} as late as possible in your pipelines.
}
\section{Missing values}{
Unlike R, most databases sorts \code{NA} (\code{NULL}s) at the front. You can
can override this behaviour by explicitly sorting on \code{is.na(x)}.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(a = c(3, 4, 1, 2), b = c(5, 1, 2, NA))
db \%>\% arrange(a) \%>\% show_query()
# Note that NAs are sorted first
db \%>\% arrange(b)
# override by sorting on is.na() first
db \%>\% arrange(is.na(b), b)
}
dbplyr/man/sql_quote.Rd 0000644 0001762 0000144 00000001034 13416416356 014633 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/escape.R
\name{sql_quote}
\alias{sql_quote}
\title{Helper function for quoting sql elements.}
\usage{
sql_quote(x, quote)
}
\arguments{
\item{x}{Character vector to escape.}
\item{quote}{Single quoting character.}
}
\description{
If the quote character is present in the string, it will be doubled.
\code{NA}s will be replaced with NULL.
}
\examples{
sql_quote("abc", "'")
sql_quote("I've had a good day", "'")
sql_quote(c("abc", NA), "'")
}
\keyword{internal}
dbplyr/man/db-io.Rd 0000644 0001762 0000144 00000004273 14002647450 013612 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db-io.R
\name{db-io}
\alias{db_copy_to}
\alias{db_compute}
\alias{db_collect}
\alias{db_table_temporary}
\title{Database I/O generics}
\usage{
db_copy_to(
con,
table,
values,
overwrite = FALSE,
types = NULL,
temporary = TRUE,
unique_indexes = NULL,
indexes = NULL,
analyze = TRUE,
...,
in_transaction = TRUE
)
db_compute(
con,
table,
sql,
temporary = TRUE,
unique_indexes = list(),
indexes = list(),
analyze = TRUE,
...
)
db_collect(con, sql, n = -1, warn_incomplete = TRUE, ...)
db_table_temporary(con, table, temporary)
}
\description{
These generics are responsible for getting data into and out of the
database. They should be used a last resort - only use them when you can't
make a backend work by providing methods for DBI generics, or for dbplyr's
SQL generation generics. They tend to be most needed when a backend has
special handling of temporary tables.
\itemize{
\item \code{db_copy_to()} implements \code{\link[=copy_to.src_sql]{copy_to.src_sql()}} by calling
\code{db_write_table()} (which calls \code{\link[DBI:dbWriteTable]{DBI::dbWriteTable()}}) to transfer the
data, then optionally adds indexes (via \code{\link[=sql_table_index]{sql_table_index()}}) and
analyses (via \code{\link[=sql_table_analyze]{sql_table_analyze()}}).
\item \code{db_compute()} implements \code{\link[=compute.tbl_sql]{compute.tbl_sql()}} by calling
\code{\link[=sql_query_save]{sql_query_save()}} to create the table, then optionally adds indexes
(via \code{\link[=sql_table_index]{sql_table_index()}}) and analyses (via \code{\link[=sql_table_analyze]{sql_table_analyze()}}).
\item \code{db_collect()} implements \code{\link[=collect.tbl_sql]{collect.tbl_sql()}} using \code{\link[DBI:dbSendQuery]{DBI::dbSendQuery()}}
and \code{\link[DBI:dbFetch]{DBI::dbFetch()}}.
\item \code{db_table_temporary()} is used for databases that have special naming
schemes for temporary tables (e.g. SQL server and SAP HANA require
temporary tables to start with \verb{#})
}
}
\seealso{
Other generic:
\code{\link{db-sql}},
\code{\link{db_connection_describe}()},
\code{\link{sql_escape_logical}()}
}
\concept{generic}
\keyword{internal}
dbplyr/man/distinct.tbl_lazy.Rd 0000644 0001762 0000144 00000002253 14015732330 016250 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-distinct.R
\name{distinct.tbl_lazy}
\alias{distinct.tbl_lazy}
\title{Subset distinct/unique rows}
\usage{
\method{distinct}{tbl_lazy}(.data, ..., .keep_all = FALSE)
}
\arguments{
\item{.data}{A lazy data frame backed by a database query.}
\item{...}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Variables, or functions of
variables. Use \code{\link[dplyr:desc]{desc()}} to sort a variable in descending order.}
\item{.keep_all}{If \code{TRUE}, keep all variables in \code{.data}.
If a combination of \code{...} is not distinct, this keeps the
first row of values.}
}
\value{
Another \code{tbl_lazy}. Use \code{\link[=show_query]{show_query()}} to see the generated
query, and use \code{\link[=collect.tbl_sql]{collect()}} to execute the query
and return data to R.
}
\description{
This is a method for the dplyr \code{\link[=distinct]{distinct()}} generic. It adds the
\code{DISTINCT} clause to the SQL query.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(x = c(1, 1, 2, 2), y = c(1, 2, 1, 1))
db \%>\% distinct() \%>\% show_query()
db \%>\% distinct(x) \%>\% show_query()
}
dbplyr/man/remote_name.Rd 0000644 0001762 0000144 00000002121 13415745770 015114 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/remote.R
\name{remote_name}
\alias{remote_name}
\alias{remote_src}
\alias{remote_con}
\alias{remote_query}
\alias{remote_query_plan}
\title{Metadata about a remote table}
\usage{
remote_name(x)
remote_src(x)
remote_con(x)
remote_query(x)
remote_query_plan(x)
}
\arguments{
\item{x}{Remote table, currently must be a \link{tbl_sql}.}
}
\value{
The value, or \code{NULL} if not remote table, or not applicable.
For example, computed queries do not have a "name"
}
\description{
\code{remote_name()} gives the name remote table, or \code{NULL} if it's a query.
\code{remote_query()} gives the text of the query, and \code{remote_query_plan()}
the query plan (as computed by the remote database). \code{remote_src()} and
\code{remote_con()} give the dplyr source and DBI connection respectively.
}
\examples{
mf <- memdb_frame(x = 1:5, y = 5:1, .name = "blorp")
remote_name(mf)
remote_src(mf)
remote_con(mf)
remote_query(mf)
mf2 <- dplyr::filter(mf, x > 3)
remote_name(mf2)
remote_src(mf2)
remote_con(mf2)
remote_query(mf2)
}
dbplyr/man/join.tbl_sql.Rd 0000644 0001762 0000144 00000012716 14002647450 015217 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-joins.R
\name{join.tbl_sql}
\alias{join.tbl_sql}
\alias{inner_join.tbl_lazy}
\alias{left_join.tbl_lazy}
\alias{right_join.tbl_lazy}
\alias{full_join.tbl_lazy}
\alias{semi_join.tbl_lazy}
\alias{anti_join.tbl_lazy}
\title{Join SQL tables}
\usage{
\method{inner_join}{tbl_lazy}(
x,
y,
by = NULL,
copy = FALSE,
suffix = NULL,
auto_index = FALSE,
...,
sql_on = NULL,
na_matches = c("never", "na")
)
\method{left_join}{tbl_lazy}(
x,
y,
by = NULL,
copy = FALSE,
suffix = NULL,
auto_index = FALSE,
...,
sql_on = NULL,
na_matches = c("never", "na")
)
\method{right_join}{tbl_lazy}(
x,
y,
by = NULL,
copy = FALSE,
suffix = NULL,
auto_index = FALSE,
...,
sql_on = NULL,
na_matches = c("never", "na")
)
\method{full_join}{tbl_lazy}(
x,
y,
by = NULL,
copy = FALSE,
suffix = NULL,
auto_index = FALSE,
...,
sql_on = NULL,
na_matches = c("never", "na")
)
\method{semi_join}{tbl_lazy}(
x,
y,
by = NULL,
copy = FALSE,
auto_index = FALSE,
...,
sql_on = NULL,
na_matches = c("never", "na")
)
\method{anti_join}{tbl_lazy}(
x,
y,
by = NULL,
copy = FALSE,
auto_index = FALSE,
...,
sql_on = NULL,
na_matches = c("never", "na")
)
}
\arguments{
\item{x, y}{A pair of lazy data frames backed by database queries.}
\item{by}{A character vector of variables to join by.
If \code{NULL}, the default, \verb{*_join()} will perform a natural join, using all
variables in common across \code{x} and \code{y}. A message lists the variables so that you
can check they're correct; suppress the message by supplying \code{by} explicitly.
To join by different variables on \code{x} and \code{y}, use a named vector.
For example, \code{by = c("a" = "b")} will match \code{x$a} to \code{y$b}.
To join by multiple variables, use a vector with length > 1.
For example, \code{by = c("a", "b")} will match \code{x$a} to \code{y$a} and \code{x$b} to
\code{y$b}. Use a named vector to match different variables in \code{x} and \code{y}.
For example, \code{by = c("a" = "b", "c" = "d")} will match \code{x$a} to \code{y$b} and
\code{x$c} to \code{y$d}.
To perform a cross-join, generating all combinations of \code{x} and \code{y},
use \code{by = character()}.}
\item{copy}{If \code{x} and \code{y} are not from the same data source,
and \code{copy} is \code{TRUE}, then \code{y} will be copied into a
temporary table in same database as \code{x}. \verb{*_join()} will automatically
run \code{ANALYZE} on the created table in the hope that this will make
you queries as efficient as possible by giving more data to the query
planner.
This allows you to join tables across srcs, but it's potentially expensive
operation so you must opt into it.}
\item{suffix}{If there are non-joined duplicate variables in \code{x} and
\code{y}, these suffixes will be added to the output to disambiguate them.
Should be a character vector of length 2.}
\item{auto_index}{if \code{copy} is \code{TRUE}, automatically create
indices for the variables in \code{by}. This may speed up the join if
there are matching indexes in \code{x}.}
\item{...}{Other parameters passed onto methods.}
\item{sql_on}{A custom join predicate as an SQL expression.
Usually joins use column equality, but you can perform more complex
queries by supply \code{sql_on} which should be a SQL expression that
uses \code{LHS} and \code{RHS} aliases to refer to the left-hand side or
right-hand side of the join respectively.}
\item{na_matches}{Should NA (NULL) values match one another?
The default, "never", is how databases usually work. \code{"na"} makes
the joins behave like the dplyr join functions, \code{\link[=merge]{merge()}}, \code{\link[=match]{match()}},
and \code{\%in\%}.}
}
\value{
Another \code{tbl_lazy}. Use \code{\link[=show_query]{show_query()}} to see the generated
query, and use \code{\link[=collect.tbl_sql]{collect()}} to execute the query
and return data to R.
}
\description{
These are methods for the dplyr \link{join} generics. They are translated
to the following SQL queries:
\itemize{
\item \code{inner_join(x, y)}: \verb{SELECT * FROM x JOIN y ON x.a = y.a}
\item \code{left_join(x, y)}: \verb{SELECT * FROM x LEFT JOIN y ON x.a = y.a}
\item \code{right_join(x, y)}: \verb{SELECT * FROM x RIGHT JOIN y ON x.a = y.a}
\item \code{full_join(x, y)}: \verb{SELECT * FROM x FULL JOIN y ON x.a = y.a}
\item \code{semi_join(x, y)}: \verb{SELECT * FROM x WHERE EXISTS (SELECT 1 FROM y WHERE x.a = y.a)}
\item \code{anti_join(x, y)}: \verb{SELECT * FROM x WHERE NOT EXISTS (SELECT 1 FROM y WHERE x.a = y.a)}
}
}
\examples{
library(dplyr, warn.conflicts = FALSE)
band_db <- tbl_memdb(dplyr::band_members)
instrument_db <- tbl_memdb(dplyr::band_instruments)
band_db \%>\% left_join(instrument_db) \%>\% show_query()
# Can join with local data frames by setting copy = TRUE
band_db \%>\%
left_join(dplyr::band_instruments, copy = TRUE)
# Unlike R, joins in SQL don't usually match NAs (NULLs)
db <- memdb_frame(x = c(1, 2, NA))
label <- memdb_frame(x = c(1, NA), label = c("one", "missing"))
db \%>\% left_join(label, by = "x")
# But you can activate R's usual behaviour with the na_matches argument
db \%>\% left_join(label, by = "x", na_matches = "na")
# By default, joins are equijoins, but you can use `sql_on` to
# express richer relationships
db1 <- memdb_frame(x = 1:5)
db2 <- memdb_frame(x = 1:3, y = letters[1:3])
db1 \%>\% left_join(db2) \%>\% show_query()
db1 \%>\% left_join(db2, sql_on = "LHS.x < RHS.x") \%>\% show_query()
}
dbplyr/man/tbl_lazy.Rd 0000644 0001762 0000144 00000001247 14002647450 014436 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tbl-lazy.R
\name{tbl_lazy}
\alias{tbl_lazy}
\alias{lazy_frame}
\title{Create a local lazy tibble}
\usage{
tbl_lazy(df, con = NULL, src = NULL)
lazy_frame(..., con = NULL, src = NULL)
}
\description{
These functions are useful for testing SQL generation without having to
have an active database connection. See \code{\link[=simulate_dbi]{simulate_dbi()}} for a list
available database simulations.
}
\examples{
library(dplyr)
df <- data.frame(x = 1, y = 2)
df_sqlite <- tbl_lazy(df, con = simulate_sqlite())
df_sqlite \%>\% summarise(x = sd(x, na.rm = TRUE)) \%>\% show_query()
}
\keyword{internal}
dbplyr/man/lahman.Rd 0000644 0001762 0000144 00000003255 14002647450 014057 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-lahman.R
\name{lahman}
\alias{lahman}
\alias{lahman_sqlite}
\alias{lahman_postgres}
\alias{lahman_mysql}
\alias{copy_lahman}
\alias{has_lahman}
\alias{lahman_srcs}
\title{Cache and retrieve an \code{src_sqlite} of the Lahman baseball database.}
\usage{
lahman_sqlite(path = NULL)
lahman_postgres(dbname = "lahman", host = "localhost", ...)
lahman_mysql(dbname = "lahman", ...)
copy_lahman(con, ...)
has_lahman(type, ...)
lahman_srcs(..., quiet = NULL)
}
\arguments{
\item{...}{Other arguments passed to \code{src} on first
load. For MySQL and PostgreSQL, the defaults assume you have a local
server with \code{lahman} database already created.
For \code{lahman_srcs()}, character vector of names giving srcs to generate.}
\item{type}{src type.}
\item{quiet}{if \code{TRUE}, suppress messages about databases failing to
connect.}
}
\description{
This creates an interesting database using data from the Lahman baseball
data source, provided by Sean Lahman at
\url{http://www.seanlahman.com/baseball-archive/statistics/}, and
made easily available in R through the \pkg{Lahman} package by
Michael Friendly, Dennis Murphy and Martin Monkman. See the documentation
for that package for documentation of the individual tables.
}
\examples{
# Connect to a local sqlite database, if already created
\donttest{
library(dplyr)
if (has_lahman("sqlite")) {
lahman_sqlite()
batting <- tbl(lahman_sqlite(), "Batting")
batting
}
# Connect to a local postgres database with lahman database, if available
if (has_lahman("postgres")) {
lahman_postgres()
batting <- tbl(lahman_postgres(), "Batting")
}
}
}
\keyword{internal}
dbplyr/man/src_dbi.Rd 0000644 0001762 0000144 00000001751 14015732330 014217 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/src_dbi.R
\name{src_dbi}
\alias{src_dbi}
\title{Database src}
\usage{
src_dbi(con, auto_disconnect = FALSE)
}
\arguments{
\item{con}{An object that inherits from \link[DBI:DBIConnection-class]{DBI::DBIConnection},
typically generated by \link[DBI:dbConnect]{DBI::dbConnect}}
\item{auto_disconnect}{Should the connection be automatically closed when
the src is deleted? Set to \code{TRUE} if you initialize the connection
the call to \code{src_dbi()}. Pass \code{NA} to auto-disconnect but print a message
when this happens.}
}
\value{
An S3 object with class \code{src_dbi}, \code{src_sql}, \code{src}.
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}}
Since can generate a \code{tbl()} directly from a DBI connection we no longer
recommend using \code{src_dbi()}.
}
\keyword{internal}
dbplyr/man/sql_variant.Rd 0000644 0001762 0000144 00000007327 14006567571 015157 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/translate-sql-string.R,
% R/translate-sql-paste.R, R/translate-sql-helpers.R, R/backend-.R,
% R/backend-odbc.R
\docType{data}
\name{sql_substr}
\alias{sql_substr}
\alias{sql_str_sub}
\alias{sql_paste}
\alias{sql_paste_infix}
\alias{sql_variant}
\alias{sql_translator}
\alias{sql_infix}
\alias{sql_prefix}
\alias{sql_aggregate}
\alias{sql_aggregate_2}
\alias{sql_aggregate_n}
\alias{sql_not_supported}
\alias{sql_cast}
\alias{sql_try_cast}
\alias{sql_log}
\alias{sql_cot}
\alias{base_scalar}
\alias{base_agg}
\alias{base_win}
\alias{base_no_win}
\alias{base_odbc_scalar}
\alias{base_odbc_agg}
\alias{base_odbc_win}
\title{Create an sql translator}
\usage{
sql_substr(f = "SUBSTR")
sql_str_sub(subset_f = "SUBSTR", length_f = "LENGTH", optional_length = TRUE)
sql_paste(default_sep, f = "CONCAT_WS")
sql_paste_infix(default_sep, op, cast)
sql_variant(
scalar = sql_translator(),
aggregate = sql_translator(),
window = sql_translator()
)
sql_translator(..., .funs = list(), .parent = new.env(parent = emptyenv()))
sql_infix(f, pad = TRUE)
sql_prefix(f, n = NULL)
sql_aggregate(f, f_r = f)
sql_aggregate_2(f)
sql_aggregate_n(f, f_r = f)
sql_not_supported(f)
sql_cast(type)
sql_try_cast(type)
sql_log()
sql_cot()
base_scalar
base_agg
base_win
base_no_win
base_odbc_scalar
base_odbc_agg
base_odbc_win
}
\arguments{
\item{f}{the name of the sql function as a string}
\item{scalar, aggregate, window}{The three families of functions than an
SQL variant can supply.}
\item{..., .funs}{named functions, used to add custom converters from standard
R functions to sql functions. Specify individually in \code{...}, or
provide a list of \code{.funs}}
\item{.parent}{the sql variant that this variant should inherit from.
Defaults to \code{base_agg} which provides a standard set of
mappings for the most common operators and functions.}
\item{pad}{If \code{TRUE}, the default, pad the infix operator with spaces.}
\item{n}{for \code{sql_infix()}, an optional number of arguments to expect.
Will signal error if not correct.}
\item{f_r}{the name of the r function being translated as a string}
}
\description{
When creating a package that maps to a new SQL based src, you'll often
want to provide some additional mappings from common R commands to the
commands that your tbl provides. These three functions make that
easy.
}
\section{Helper functions}{
\code{sql_infix()} and \code{sql_prefix()} create default SQL infix and prefix
functions given the name of the SQL function. They don't perform any input
checking, but do correctly escape their input, and are useful for
quickly providing default wrappers for a new SQL variant.
}
\examples{
# An example of adding some mappings for the statistical functions that
# postgresql provides: http://bit.ly/K5EdTn
postgres_agg <- sql_translator(.parent = base_agg,
cor = sql_aggregate_2("CORR"),
cov = sql_aggregate_2("COVAR_SAMP"),
sd = sql_aggregate("STDDEV_SAMP", "sd"),
var = sql_aggregate("VAR_SAMP", "var")
)
# Next we have to simulate a connection that uses this variant
con <- simulate_dbi("TestCon")
sql_translation.TestCon <- function(x) {
sql_variant(
base_scalar,
postgres_agg,
base_no_win
)
}
translate_sql(cor(x, y), con = con, window = FALSE)
translate_sql(sd(income / years), con = con, window = FALSE)
# Any functions not explicitly listed in the converter will be translated
# to sql as is, so you don't need to convert all functions.
translate_sql(regr_intercept(y, x), con = con)
}
\seealso{
\code{\link[=win_over]{win_over()}} for helper functions for window functions.
\code{\link[=sql]{sql()}} for an example of a more customised sql
conversion function.
}
\keyword{datasets}
\keyword{internal}
dbplyr/man/fill.tbl_lazy.Rd 0000644 0001762 0000144 00000003300 14004012136 015340 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-fill.R
\name{fill.tbl_lazy}
\alias{fill.tbl_lazy}
\title{Fill in missing values with previous or next value}
\usage{
fill.tbl_lazy(.data, ..., .direction = c("down", "up"))
}
\arguments{
\item{.data}{A lazy data frame backed by a database query.}
\item{...}{Columns to fill.}
\item{.direction}{Direction in which to fill missing values. Currently
either "down" (the default) or "up". Note that "up" does not work when
\code{.data} is sorted by non-numeric columns. As a workaround revert the order
yourself beforehand; for example replace \code{arrange(x, desc(y))} by
\code{arrange(desc(x), y)}.}
}
\description{
Fill in missing values with previous or next value
}
\examples{
squirrels <- tibble::tribble(
~group, ~name, ~role, ~n_squirrels, ~ n_squirrels2,
1, "Sam", "Observer", NA, 1,
1, "Mara", "Scorekeeper", 8, NA,
1, "Jesse", "Observer", NA, NA,
1, "Tom", "Observer", NA, 4,
2, "Mike", "Observer", NA, NA,
2, "Rachael", "Observer", NA, 6,
2, "Sydekea", "Scorekeeper", 14, NA,
2, "Gabriela", "Observer", NA, NA,
3, "Derrick", "Observer", NA, NA,
3, "Kara", "Scorekeeper", 9, 10,
3, "Emily", "Observer", NA, NA,
3, "Danielle", "Observer", NA, NA
)
squirrels$id <- 1:12
if (require("tidyr", quietly = TRUE)) {
tbl_memdb(squirrels) \%>\%
window_order(id) \%>\%
tidyr::fill(
n_squirrels,
n_squirrels2,
)
}
}
dbplyr/man/expand.tbl_lazy.Rd 0000644 0001762 0000144 00000004174 14004012136 015703 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-expand.R
\name{expand.tbl_lazy}
\alias{expand.tbl_lazy}
\title{Expand SQL tables to include all possible combinations of values}
\usage{
expand.tbl_lazy(data, ..., .name_repair = "check_unique")
}
\arguments{
\item{data}{A lazy data frame backed by a database query.}
\item{...}{Specification of columns to expand. See \link[tidyr:expand]{tidyr::expand} for
more details.}
\item{.name_repair}{Treatment of problematic column names:
\itemize{
\item \code{"minimal"}: No name repair or checks, beyond basic existence,
\item \code{"unique"}: Make sure names are unique and not empty,
\item \code{"check_unique"}: (default value), no name repair, but check they are
\code{unique},
\item \code{"universal"}: Make the names \code{unique} and syntactic
\item a function: apply custom name repair (e.g., \code{.name_repair = make.names}
for names in the style of base R).
\item A purrr-style anonymous function, see \code{\link[rlang:as_function]{rlang::as_function()}}
}
This argument is passed on as \code{repair} to \code{\link[vctrs:vec_as_names]{vctrs::vec_as_names()}}.
See there for more details on these terms and the strategies used
to enforce them.}
}
\value{
Another \code{tbl_lazy}. Use \code{\link[=show_query]{show_query()}} to see the generated
query, and use \code{\link[=collect.tbl_sql]{collect()}} to execute the query
and return data to R.
}
\description{
This is a method for the \link[tidyr:expand]{tidyr::expand} generics. It doesn't sort the
result explicitly, so the order might be different to what \code{expand()}
returns for data frames.
}
\examples{
if (require("tidyr", quietly = TRUE)) {
fruits <- memdb_frame(
type = c("apple", "orange", "apple", "orange", "orange", "orange"),
year = c(2010, 2010, 2012, 2010, 2010, 2012),
size = c("XS", "S", "M", "S", "S", "M"),
weights = rnorm(6)
)
# All possible combinations ---------------------------------------
fruits \%>\% expand(type)
fruits \%>\% expand(type, size)
# Only combinations that already appear in the data ---------------
fruits \%>\% expand(nesting(type, size))
}
}
dbplyr/man/summarise.tbl_lazy.Rd 0000644 0001762 0000144 00000003343 14015732330 016435 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-summarise.R
\name{summarise.tbl_lazy}
\alias{summarise.tbl_lazy}
\title{Summarise each group to one row}
\usage{
\method{summarise}{tbl_lazy}(.data, ..., .groups = NULL)
}
\arguments{
\item{.data}{A lazy data frame backed by a database query.}
\item{...}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Variables, or functions of
variables. Use \code{\link[dplyr:desc]{desc()}} to sort a variable in descending order.}
\item{.groups}{\Sexpr[results=rd]{lifecycle::badge("experimental")} Grouping structure of the result.
\itemize{
\item "drop_last": dropping the last level of grouping. This was the
only supported option before version 1.0.0.
\item "drop": All levels of grouping are dropped.
\item "keep": Same grouping structure as \code{.data}.
}
When \code{.groups} is not specified, it defaults to "drop_last".
In addition, a message informs you of that choice, unless the result is ungrouped,
the option "dplyr.summarise.inform" is set to \code{FALSE},
or when \code{summarise()} is called from a function in a package.}
}
\value{
Another \code{tbl_lazy}. Use \code{\link[=show_query]{show_query()}} to see the generated
query, and use \code{\link[=collect.tbl_sql]{collect()}} to execute the query
and return data to R.
}
\description{
This is a method for the dplyr \code{\link[=summarise]{summarise()}} generic. It generates the
\code{SELECT} clause of the SQL query, and generally needs to be combined with
\code{group_by()}.
}
\examples{
library(dplyr, warn.conflicts = FALSE)
db <- memdb_frame(g = c(1, 1, 1, 2, 2), x = c(4, 3, 6, 9, 2))
db \%>\%
summarise(n()) \%>\%
show_query()
db \%>\%
group_by(g) \%>\%
summarise(n()) \%>\%
show_query()
}
dbplyr/man/win_over.Rd 0000644 0001762 0000144 00000003156 13734215523 014452 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/translate-sql-window.R
\name{win_over}
\alias{win_over}
\alias{win_rank}
\alias{win_aggregate}
\alias{win_aggregate_2}
\alias{win_recycled}
\alias{win_cumulative}
\alias{win_absent}
\alias{win_current_group}
\alias{win_current_order}
\alias{win_current_frame}
\title{Generate SQL expression for window functions}
\usage{
win_over(
expr,
partition = NULL,
order = NULL,
frame = NULL,
con = sql_current_con()
)
win_rank(f)
win_aggregate(f)
win_aggregate_2(f)
win_cumulative(f)
win_absent(f)
win_current_group()
win_current_order()
win_current_frame()
}
\arguments{
\item{expr}{The window expression}
\item{order}{Variables to order by}
\item{frame}{A numeric vector of length two defining the frame.}
\item{f}{The name of an sql function as a string}
\item{parition}{Variables to partition over}
}
\description{
\code{win_over()} makes it easy to generate the window function specification.
\code{win_absent()}, \code{win_rank()}, \code{win_aggregate()}, and \code{win_cumulative()}
provide helpers for constructing common types of window functions.
\code{win_current_group()} and \code{win_current_order()} allow you to access
the grouping and order context set up by \code{\link[=group_by]{group_by()}} and \code{\link[=arrange]{arrange()}}.
}
\examples{
con <- simulate_dbi()
win_over(sql("avg(x)"), con = con)
win_over(sql("avg(x)"), "y", con = con)
win_over(sql("avg(x)"), order = "y", con = con)
win_over(sql("avg(x)"), order = c("x", "y"), con = con)
win_over(sql("avg(x)"), frame = c(-Inf, 0), order = "y", con = con)
}
\keyword{internal}
dbplyr/man/partial_eval.Rd 0000644 0001762 0000144 00000004337 13734215523 015267 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/partial-eval.R
\name{partial_eval}
\alias{partial_eval}
\title{Partially evaluate an expression.}
\usage{
partial_eval(call, vars = character(), env = caller_env())
}
\arguments{
\item{call}{an unevaluated expression, as produced by \code{\link[=quote]{quote()}}}
\item{vars}{character vector of variable names.}
\item{env}{environment in which to search for local values}
}
\description{
This function partially evaluates an expression, using information from
the tbl to determine whether names refer to local expressions
or remote variables. This simplifies SQL translation because expressions
don't need to carry around their environment - all relevant information
is incorporated into the expression.
}
\section{Symbol substitution}{
\code{partial_eval()} needs to guess if you're referring to a variable on the
server (remote), or in the current environment (local). It's not possible to
do this 100\% perfectly. \code{partial_eval()} uses the following heuristic:
\itemize{
\item If the tbl variables are known, and the symbol matches a tbl
variable, then remote.
\item If the symbol is defined locally, local.
\item Otherwise, remote.
}
You can override the guesses using \code{local()} and \code{remote()} to force
computation, or by using the \code{.data} and \code{.env} pronouns of tidy evaluation.
}
\examples{
vars <- c("year", "id")
partial_eval(quote(year > 1980), vars = vars)
ids <- c("ansonca01", "forceda01", "mathebo01")
partial_eval(quote(id \%in\% ids), vars = vars)
# cf.
partial_eval(quote(id == .data$ids), vars = vars)
# You can use local() or .env to disambiguate between local and remote
# variables: otherwise remote is always preferred
year <- 1980
partial_eval(quote(year > year), vars = vars)
partial_eval(quote(year > local(year)), vars = vars)
partial_eval(quote(year > .env$year), vars = vars)
# Functions are always assumed to be remote. Use local to force evaluation
# in R.
f <- function(x) x + 1
partial_eval(quote(year > f(1980)), vars = vars)
partial_eval(quote(year > local(f(1980))), vars = vars)
# For testing you can also use it with the tbl omitted
partial_eval(quote(1 + 2 * 3))
x <- 1
partial_eval(quote(x ^ y))
}
\keyword{internal}
dbplyr/man/dbplyr_uncount.Rd 0000644 0001762 0000144 00000001767 14004012136 015661 0 ustar ligges users % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/verb-uncount.R
\name{dbplyr_uncount}
\alias{dbplyr_uncount}
\title{"Uncount" a database table}
\usage{
dbplyr_uncount(data, weights, .remove = TRUE, .id = NULL)
}
\arguments{
\item{data}{A lazy data frame backed by a database query.}
\item{weights}{A vector of weights. Evaluated in the context of \code{data};
supports quasiquotation.}
\item{.remove}{If \code{TRUE}, and \code{weights} is the name of a column in \code{data},
then this column is removed.}
\item{.id}{Supply a string to create a new variable which gives a unique
identifier for each created row.}
}
\description{
This is a method for the tidyr \code{uncount()} generic. It uses a temporary
table, so your database user needs permissions to create one.
}
\examples{
df <- memdb_frame(x = c("a", "b"), n = c(1, 2))
dbplyr_uncount(df, n)
dbplyr_uncount(df, n, .id = "id")
# You can also use constants
dbplyr_uncount(df, 2)
# Or expressions
dbplyr_uncount(df, 2 / n)
}
dbplyr/man/figures/ 0000755 0001762 0000144 00000000000 14002647450 013767 5 ustar ligges users dbplyr/man/figures/lifecycle-defunct.svg 0000644 0001762 0000144 00000001704 14002647450 020077 0 ustar ligges users