diff --git a/R/spark_read_bigquery.R b/R/spark_read_bigquery.R index c2ffd62..025deae 100644 --- a/R/spark_read_bigquery.R +++ b/R/spark_read_bigquery.R @@ -28,7 +28,7 @@ #' the service account will be used to interact with BigQuery and Google Cloud Storage (GCS). #' Defaults to \code{\link{default_service_account_key_file}}. #' @param additionalParameters -#' \href{https://github.com/GoogleCloudDataproc/spark-bigquery-connector?tab=readme-ov-file#properties}{Additional Spark BigQuery connector options}. +#' \href{https://github.com/GoogleCloudDataproc/spark-bigquery-connector?tab=readme-ov-file#properties}{List of additional Spark BigQuery connector options}. #' @param memory \code{logical} specifying whether data should be loaded eagerly into #' memory, i.e. whether the table should be cached. Note that eagerly caching prevents #' predicate pushdown (e.g. in conjunction with \code{\link[dplyr]{filter}}) and therefore diff --git a/R/spark_write_bigquery.R b/R/spark_write_bigquery.R index 9a292de..6d52c53 100644 --- a/R/spark_write_bigquery.R +++ b/R/spark_write_bigquery.R @@ -12,7 +12,7 @@ #' with Google Cloud services. The use of service accounts is highly recommended. Specifically, #' the service account will be used to interact with BigQuery and Google Cloud Storage (GCS). #' @param additionalParameters -#' \href{https://github.com/GoogleCloudDataproc/spark-bigquery-connector?tab=readme-ov-file#properties}{Additional Spark BigQuery connector options}. +#' \href{https://github.com/GoogleCloudDataproc/spark-bigquery-connector?tab=readme-ov-file#properties}{List of additional Spark BigQuery connector options}. #' @param mode Specifies the behavior when data or table already exist. One of "overwrite", #' "append", "ignore" or "error" (default). #' @param ... Additional arguments passed to \code{\link[sparklyr]{spark_write_source}}. diff --git a/man/spark_read_bigquery.Rd b/man/spark_read_bigquery.Rd index d08c9a2..0f87e78 100644 --- a/man/spark_read_bigquery.Rd +++ b/man/spark_read_bigquery.Rd @@ -56,7 +56,7 @@ with Google Cloud services. The use of service accounts is highly recommended. S the service account will be used to interact with BigQuery and Google Cloud Storage (GCS). Defaults to \code{\link{default_service_account_key_file}}.} -\item{additionalParameters}{\href{https://github.com/GoogleCloudDataproc/spark-bigquery-connector?tab=readme-ov-file#properties}{Additional Spark BigQuery connector options}.} +\item{additionalParameters}{\href{https://github.com/GoogleCloudDataproc/spark-bigquery-connector?tab=readme-ov-file#properties}{List of additional Spark BigQuery connector options}.} \item{memory}{\code{logical} specifying whether data should be loaded eagerly into memory, i.e. whether the table should be cached. Note that eagerly caching prevents diff --git a/man/spark_write_bigquery.Rd b/man/spark_write_bigquery.Rd index 22f7712..2f52dac 100644 --- a/man/spark_write_bigquery.Rd +++ b/man/spark_write_bigquery.Rd @@ -29,7 +29,7 @@ Defaults to \code{default_project_id()}.} with Google Cloud services. The use of service accounts is highly recommended. Specifically, the service account will be used to interact with BigQuery and Google Cloud Storage (GCS).} -\item{additionalParameters}{\href{https://github.com/GoogleCloudDataproc/spark-bigquery-connector?tab=readme-ov-file#properties}{Additional Spark BigQuery connector options}.} +\item{additionalParameters}{\href{https://github.com/GoogleCloudDataproc/spark-bigquery-connector?tab=readme-ov-file#properties}{List of additional Spark BigQuery connector options}.} \item{mode}{Specifies the behavior when data or table already exist. One of "overwrite", "append", "ignore" or "error" (default).} diff --git a/tests/testthat/test-read.R b/tests/testthat/test-read.R index b1f87ef..6617b56 100644 --- a/tests/testthat/test-read.R +++ b/tests/testthat/test-read.R @@ -8,7 +8,8 @@ test_that("reading BigQuery tables works", { name = "shakespeare", projectId = "bigquery-public-data", datasetId = "samples", - tableId = "shakespeare" + tableId = "shakespeare", + additionalParameters = list(parentProject = default_project_id()) ) expect_equal(shakespeare %>% sparklyr::sdf_nrow(), 164656) @@ -22,7 +23,8 @@ test_that("executing SQL queries works", { shakespeare <- spark_read_bigquery( sc, name = "shakespeare", - sqlQuery = "SELECT * FROM bigquery-public-data.samples.shakespeare" + sqlQuery = "SELECT * FROM bigquery-public-data.samples.shakespeare", + additionalParameters = list(parentProject = default_project_id()) ) expect_equal(shakespeare %>% sparklyr::sdf_nrow(), 164656)