From dc6ede3db1ea88f1115eb8d67d1d8ca0824816c1 Mon Sep 17 00:00:00 2001 From: Raphael Taylor-Davies Date: Fri, 27 Oct 2023 17:37:58 +0100 Subject: [PATCH] Implement DynamoDBLock (#4880) --- .github/workflows/object_store.yml | 2 + object_store/src/aws/builder.rs | 24 +- object_store/src/aws/client.rs | 99 +++---- object_store/src/aws/dynamo.rs | 416 +++++++++++++++++++++++++++ object_store/src/aws/mod.rs | 46 ++- object_store/src/aws/precondition.rs | 11 + 6 files changed, 508 insertions(+), 90 deletions(-) create mode 100644 object_store/src/aws/dynamo.rs diff --git a/.github/workflows/object_store.yml b/.github/workflows/object_store.yml index 1b991e33c097..06ba203c55b0 100644 --- a/.github/workflows/object_store.yml +++ b/.github/workflows/object_store.yml @@ -112,6 +112,7 @@ jobs: AWS_SECRET_ACCESS_KEY: test AWS_ENDPOINT: http://localhost:4566 AWS_ALLOW_HTTP: true + AWS_COPY_IF_NOT_EXISTS: dynamo:test-table HTTP_URL: "http://localhost:8080" GOOGLE_BUCKET: test-bucket GOOGLE_SERVICE_ACCOUNT: "/tmp/gcs.json" @@ -136,6 +137,7 @@ jobs: docker run -d -p 4566:4566 localstack/localstack:2.0 docker run -d -p 1338:1338 amazon/amazon-ec2-metadata-mock:v1.9.2 --imdsv2 aws --endpoint-url=http://localhost:4566 s3 mb s3://test-bucket + aws --endpoint-url=http://localhost:4566 dynamodb create-table --table-name test-table --key-schema AttributeName=key,KeyType=HASH --attribute-definitions AttributeName=key,AttributeType=S --provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 - name: Configure Azurite (Azure emulation) # the magical connection string is from diff --git a/object_store/src/aws/builder.rs b/object_store/src/aws/builder.rs index cf9490d96eae..8798150196a8 100644 --- a/object_store/src/aws/builder.rs +++ b/object_store/src/aws/builder.rs @@ -844,27 +844,23 @@ impl AmazonS3Builder { )) as _ }; - let endpoint: String; - let bucket_endpoint: String; - // If `endpoint` is provided then its assumed to be consistent with // `virtual_hosted_style_request`. i.e. if `virtual_hosted_style_request` is true then // `endpoint` should have bucket name included. - if self.virtual_hosted_style_request.get()? { - endpoint = self - .endpoint - .unwrap_or_else(|| format!("https://{bucket}.s3.{region}.amazonaws.com")); - bucket_endpoint = endpoint.clone(); + let bucket_endpoint = if self.virtual_hosted_style_request.get()? { + self.endpoint + .clone() + .unwrap_or_else(|| format!("https://{bucket}.s3.{region}.amazonaws.com")) } else { - endpoint = self - .endpoint - .unwrap_or_else(|| format!("https://s3.{region}.amazonaws.com")); - bucket_endpoint = format!("{endpoint}/{bucket}"); - } + match &self.endpoint { + None => format!("https://s3.{region}.amazonaws.com/{bucket}"), + Some(endpoint) => format!("{endpoint}/{bucket}"), + } + }; let config = S3Config { region, - endpoint, + endpoint: self.endpoint, bucket, bucket_endpoint, credentials, diff --git a/object_store/src/aws/client.rs b/object_store/src/aws/client.rs index 3e47abd4bcc5..203d03d19910 100644 --- a/object_store/src/aws/client.rs +++ b/object_store/src/aws/client.rs @@ -21,7 +21,7 @@ use crate::aws::{ AwsCredentialProvider, S3ConditionalPut, S3CopyIfNotExists, STORE, STRICT_PATH_ENCODE_SET, }; use crate::client::get::GetClient; -use crate::client::header::HeaderConfig; +use crate::client::header::{get_etag, HeaderConfig}; use crate::client::header::{get_put_result, get_version}; use crate::client::list::ListClient; use crate::client::retry::RetryExt; @@ -39,13 +39,14 @@ use async_trait::async_trait; use base64::prelude::BASE64_STANDARD; use base64::Engine; use bytes::{Buf, Bytes}; +use hyper::http; use hyper::http::HeaderName; use itertools::Itertools; use percent_encoding::{utf8_percent_encode, PercentEncode}; use quick_xml::events::{self as xml_events}; use reqwest::{ header::{CONTENT_LENGTH, CONTENT_TYPE}, - Client as ReqwestClient, Method, RequestBuilder, Response, StatusCode, + Client as ReqwestClient, Method, RequestBuilder, Response, }; use serde::{Deserialize, Serialize}; use snafu::{ResultExt, Snafu}; @@ -196,7 +197,7 @@ impl From for Error { #[derive(Debug)] pub struct S3Config { pub region: String, - pub endpoint: String, + pub endpoint: Option, pub bucket: String, pub bucket_endpoint: String, pub credentials: AwsCredentialProvider, @@ -215,7 +216,7 @@ impl S3Config { format!("{}/{}", self.bucket_endpoint, encode_path(path)) } - async fn get_credential(&self) -> Result>> { + pub(crate) async fn get_credential(&self) -> Result>> { Ok(match self.skip_signature { false => Some(self.credentials.get_credential().await?), true => None, @@ -223,26 +224,30 @@ impl S3Config { } } -/// A builder for a put request allowing customisation of the headers and query string -pub(crate) struct PutRequest<'a> { +/// A builder for a request allowing customisation of the headers and query string +pub(crate) struct Request<'a> { path: &'a Path, config: &'a S3Config, builder: RequestBuilder, payload_sha256: Option>, } -impl<'a> PutRequest<'a> { +impl<'a> Request<'a> { pub fn query(self, query: &T) -> Self { let builder = self.builder.query(query); Self { builder, ..self } } - pub fn header(self, k: &HeaderName, v: &str) -> Self { + pub fn header(self, k: K, v: &str) -> Self + where + HeaderName: TryFrom, + >::Error: Into, + { let builder = self.builder.header(k, v); Self { builder, ..self } } - pub async fn send(self) -> Result { + pub async fn send(self) -> Result { let credential = self.config.get_credential().await?; let response = self @@ -260,14 +265,19 @@ impl<'a> PutRequest<'a> { path: self.path.as_ref(), })?; + Ok(response) + } + + pub async fn do_put(self) -> Result { + let response = self.send().await?; Ok(get_put_result(response.headers(), VERSION_HEADER).context(MetadataSnafu)?) } } #[derive(Debug)] pub(crate) struct S3Client { - config: S3Config, - client: ReqwestClient, + pub config: S3Config, + pub client: ReqwestClient, } impl S3Client { @@ -276,20 +286,15 @@ impl S3Client { Ok(Self { config, client }) } - /// Returns the config - pub fn config(&self) -> &S3Config { - &self.config - } - /// Make an S3 PUT request /// /// Returns the ETag - pub fn put_request<'a>(&'a self, path: &'a Path, bytes: Bytes) -> PutRequest<'a> { + pub fn put_request<'a>(&'a self, path: &'a Path, bytes: Bytes) -> Request<'a> { let url = self.config.path_url(path); let mut builder = self.client.request(Method::PUT, url); let mut payload_sha256 = None; - if let Some(checksum) = self.config().checksum { + if let Some(checksum) = self.config.checksum { let digest = checksum.digest(&bytes); builder = builder.header(checksum.header_name(), BASE64_STANDARD.encode(&digest)); if checksum == Checksum::SHA256 { @@ -302,11 +307,11 @@ impl S3Client { false => builder.body(bytes), }; - if let Some(value) = self.config().client_options.get_content_type(path) { + if let Some(value) = self.config.client_options.get_content_type(path) { builder = builder.header(CONTENT_TYPE, value); } - PutRequest { + Request { path, builder, payload_sha256, @@ -400,7 +405,7 @@ impl S3Client { // Compute checksum - S3 *requires* this for DeleteObjects requests, so we default to // their algorithm if the user hasn't specified one. - let checksum = self.config().checksum.unwrap_or(Checksum::SHA256); + let checksum = self.config.checksum.unwrap_or(Checksum::SHA256); let digest = checksum.digest(&body); builder = builder.header(checksum.header_name(), BASE64_STANDARD.encode(&digest)); let payload_sha256 = if checksum == Checksum::SHA256 { @@ -451,52 +456,21 @@ impl S3Client { } /// Make an S3 Copy request - pub async fn copy_request(&self, from: &Path, to: &Path, overwrite: bool) -> Result<()> { - let credential = self.config.get_credential().await?; + pub fn copy_request<'a>(&'a self, from: &Path, to: &'a Path) -> Request<'a> { let url = self.config.path_url(to); let source = format!("{}/{}", self.config.bucket, encode_path(from)); - let mut builder = self + let builder = self .client .request(Method::PUT, url) .header("x-amz-copy-source", source); - if !overwrite { - match &self.config.copy_if_not_exists { - Some(S3CopyIfNotExists::Header(k, v)) => { - builder = builder.header(k, v); - } - None => { - return Err(crate::Error::NotSupported { - source: "S3 does not support copy-if-not-exists".to_string().into(), - }) - } - } + Request { + builder, + path: to, + config: &self.config, + payload_sha256: None, } - - builder - .with_aws_sigv4( - credential.as_deref(), - &self.config.region, - "s3", - self.config.sign_payload, - None, - ) - .send_retry(&self.config.retry_config) - .await - .map_err(|source| match source.status() { - Some(StatusCode::PRECONDITION_FAILED) => crate::Error::AlreadyExists { - source: Box::new(source), - path: to.to_string(), - }, - _ => Error::CopyRequest { - source, - path: from.to_string(), - } - .into(), - })?; - - Ok(()) } pub async fn create_multipart(&self, location: &Path) -> Result { @@ -535,15 +509,14 @@ impl S3Client { ) -> Result { let part = (part_idx + 1).to_string(); - let result = self + let response = self .put_request(path, data) .query(&[("partNumber", &part), ("uploadId", upload_id)]) .send() .await?; - Ok(PartId { - content_id: result.e_tag.unwrap(), - }) + let content_id = get_etag(response.headers()).context(MetadataSnafu)?; + Ok(PartId { content_id }) } pub async fn complete_multipart( diff --git a/object_store/src/aws/dynamo.rs b/object_store/src/aws/dynamo.rs new file mode 100644 index 000000000000..cb6a7b2c225e --- /dev/null +++ b/object_store/src/aws/dynamo.rs @@ -0,0 +1,416 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! A DynamoDB based lock system + +use crate::aws::client::S3Client; +use crate::aws::credential::CredentialExt; +use crate::client::get::GetClientExt; +use crate::client::retry::Error as RetryError; +use crate::client::retry::RetryExt; +use crate::path::Path; +use crate::{Error, GetOptions, Result}; +use chrono::Utc; +use reqwest::StatusCode; +use serde::ser::SerializeMap; +use serde::{Deserialize, Serialize, Serializer}; +use std::collections::HashMap; +use std::time::{Duration, Instant}; + +/// The exception returned by DynamoDB on conflict +const CONFLICT: &str = "com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException"; + +/// A DynamoDB-based commit protocol, used to provide conditional write support for S3 +/// +/// ## Limitations +/// +/// Only conditional operations, e.g. `copy_if_not_exists` will be synchronized, and can +/// therefore race with non-conditional operations, e.g. `put`, `copy`, or conditional +/// operations performed by writers not configured to synchronize with DynamoDB. +/// +/// Workloads making use of this mechanism **must** ensure: +/// +/// * Conditional and non-conditional operations are not performed on the same paths +/// * Conditional operations are only performed via similarly configured clients +/// +/// Additionally as the locking mechanism relies on timeouts to detect stale locks, +/// performance will be poor for systems that frequently delete and then create +/// objects at the same path, instead being optimised for systems that primarily create +/// files with paths never used before, or perform conditional updates to existing files +/// +/// ## Commit Protocol +/// +/// The DynamoDB schema is as follows: +/// +/// * A string hash key named `"key"` +/// * A numeric [TTL] attribute named `"ttl"` +/// * A numeric attribute named `"generation"` +/// * A numeric attribute named `"timeout"` +/// +/// To perform a conditional operation on an object with a given `path` and `etag` (if exists), +/// the commit protocol is as follows: +/// +/// 1. Perform HEAD request on `path` and error on precondition mismatch +/// 2. Create record in DynamoDB with key `{path}#{etag}` with the configured timeout +/// 1. On Success: Perform operation with the configured timeout +/// 2. On Conflict: +/// 1. Periodically re-perform HEAD request on `path` and error on precondition mismatch +/// 2. If `timeout * max_skew_rate` passed, replace the record incrementing the `"generation"` +/// 1. On Success: GOTO 2.1 +/// 2. On Conflict: GOTO 2.2 +/// +/// Provided no writer modifies an object with a given `path` and `etag` without first adding a +/// corresponding record to DynamoDB, we are guaranteed that only one writer will ever commit. +/// +/// This is inspired by the [DynamoDB Lock Client] but simplified for the more limited +/// requirements of synchronizing object storage. The major changes are: +/// +/// * Uses a monotonic generation count instead of a UUID rvn, as this is: +/// * Cheaper to generate, serialize and compare +/// * Cannot collide +/// * More human readable / interpretable +/// * Relies on [TTL] to eventually clean up old locks +/// +/// It also draws inspiration from the DeltaLake [S3 Multi-Cluster] commit protocol, but +/// generalised to not make assumptions about the workload and not rely on first writing +/// to a temporary path. +/// +/// [TTL]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/howitworks-ttl.html +/// [DynamoDB Lock Client]: https://aws.amazon.com/blogs/database/building-distributed-locks-with-the-dynamodb-lock-client/ +/// [S3 Multi-Cluster]: https://docs.google.com/document/d/1Gs4ZsTH19lMxth4BSdwlWjUNR-XhKHicDvBjd2RqNd8/edit#heading=h.mjjuxw9mcz9h +#[derive(Debug, Clone)] +pub struct DynamoCommit { + table_name: String, + /// The number of seconds a lease is valid for + timeout: usize, + /// The maximum clock skew rate tolerated by the system + max_clock_skew_rate: u32, + /// The length of time a record will be retained in DynamoDB before being cleaned up + /// + /// This is purely an optimisation to avoid indefinite growth of the DynamoDB table + /// and does not impact how long clients may wait to acquire a lock + ttl: Duration, + /// The backoff duration before retesting a condition + test_interval: Duration, +} + +impl DynamoCommit { + /// Create a new [`DynamoCommit`] with a given table name + pub fn new(table_name: String) -> Self { + Self { + table_name, + timeout: 20, + max_clock_skew_rate: 3, + ttl: Duration::from_secs(60 * 60), + test_interval: Duration::from_millis(100), + } + } + + /// Returns the name of the DynamoDB table + pub fn table_name(&self) -> &str { + &self.table_name + } + + pub(crate) async fn copy_if_not_exists( + &self, + client: &S3Client, + from: &Path, + to: &Path, + ) -> Result<()> { + check_not_exists(client, to).await?; + + let mut previous_lease = None; + + loop { + let existing = previous_lease.as_ref(); + match self.try_lock(client, to.as_ref(), existing).await? { + TryLockResult::Ok(lease) => { + let fut = client.copy_request(from, to).send(); + let expiry = lease.acquire + lease.timeout; + return match tokio::time::timeout_at(expiry.into(), fut).await { + Ok(Ok(_)) => Ok(()), + Ok(Err(e)) => Err(e), + Err(_) => Err(Error::Generic { + store: "DynamoDB", + source: format!( + "Failed to perform copy operation in {} seconds", + self.timeout + ) + .into(), + }), + }; + } + TryLockResult::Conflict(conflict) => { + let mut interval = tokio::time::interval(self.test_interval); + let expiry = conflict.timeout * self.max_clock_skew_rate; + loop { + interval.tick().await; + check_not_exists(client, to).await?; + if conflict.acquire.elapsed() > expiry { + previous_lease = Some(conflict); + break; + } + } + } + } + } + } + + async fn try_lock( + &self, + s3: &S3Client, + key: &str, + existing: Option<&Lease>, + ) -> Result { + let attributes; + let (next_gen, condition_expression, expression_attribute_values) = match existing { + None => (0_usize, "attribute_not_exists(#pk)", Map(&[])), + Some(existing) => { + attributes = [(":g", AttributeValue::Number(existing.generation))]; + ( + existing.generation.checked_add(1).unwrap(), + "attribute_exists(#pk) AND generation == :g", + Map(attributes.as_slice()), + ) + } + }; + + let ttl = (Utc::now() + self.ttl).timestamp(); + let items = [ + ("key", AttributeValue::String(key)), + ("generation", AttributeValue::Number(next_gen)), + ("timeout", AttributeValue::Number(self.timeout)), + ("ttl", AttributeValue::Number(ttl as _)), + ]; + let names = [("#pk", "key")]; + + let req = PutItem { + table_name: &self.table_name, + condition_expression, + expression_attribute_values, + expression_attribute_names: Map(&names), + item: Map(&items), + return_values: None, + return_values_on_condition_check_failure: Some(ReturnValues::AllOld), + }; + + let credential = s3.config.get_credential().await?; + + let acquire = Instant::now(); + let region = &s3.config.region; + + let builder = match &s3.config.endpoint { + Some(e) => s3.client.post(e), + None => { + let url = format!("https://dynamodb.{region}.amazonaws.com",); + s3.client.post(url) + } + }; + + let response = builder + .json(&req) + .header("X-Amz-Target", "DynamoDB_20120810.PutItem") + .with_aws_sigv4(credential.as_deref(), region, "dynamodb", true, None) + .send_retry(&s3.config.retry_config) + .await; + + match response { + Ok(_) => Ok(TryLockResult::Ok(Lease { + acquire, + generation: next_gen, + timeout: Duration::from_secs(self.timeout as _), + })), + Err(e) => match try_extract_lease(&e) { + Some(lease) => Ok(TryLockResult::Conflict(lease)), + None => Err(Error::Generic { + store: "DynamoDB", + source: Box::new(e), + }), + }, + } + } +} + +#[derive(Debug)] +enum TryLockResult { + /// Successfully acquired a lease + Ok(Lease), + /// An existing lease was found + Conflict(Lease), +} + +/// Returns an [`Error::AlreadyExists`] if `path` exists +async fn check_not_exists(client: &S3Client, path: &Path) -> Result<()> { + let options = GetOptions { + head: true, + ..Default::default() + }; + match client.get_opts(path, options).await { + Ok(_) => Err(Error::AlreadyExists { + path: path.to_string(), + source: "Already Exists".to_string().into(), + }), + Err(Error::NotFound { .. }) => Ok(()), + Err(e) => Err(e), + } +} + +/// If [`RetryError`] corresponds to [`CONFLICT`] extracts the pre-existing [`Lease`] +fn try_extract_lease(e: &RetryError) -> Option { + match e { + RetryError::Client { + status: StatusCode::BAD_REQUEST, + body: Some(b), + } => { + let resp: ErrorResponse<'_> = serde_json::from_str(b).ok()?; + if resp.error != CONFLICT { + return None; + } + + let generation = match resp.item.get("generation") { + Some(AttributeValue::Number(generation)) => generation, + _ => return None, + }; + + let timeout = match resp.item.get("timeout") { + Some(AttributeValue::Number(timeout)) => *timeout, + _ => return None, + }; + + Some(Lease { + acquire: Instant::now(), + generation: *generation, + timeout: Duration::from_secs(timeout as _), + }) + } + _ => None, + } +} + +/// A lock lease +#[derive(Debug, Clone)] +struct Lease { + acquire: Instant, + generation: usize, + timeout: Duration, +} + +/// A DynamoDB [PutItem] payload +/// +/// [PutItem]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html +#[derive(Serialize)] +#[serde(rename_all = "PascalCase")] +struct PutItem<'a> { + /// The table name + table_name: &'a str, + + /// A condition that must be satisfied in order for a conditional PutItem operation to succeed. + condition_expression: &'a str, + + /// One or more substitution tokens for attribute names in an expression + expression_attribute_names: Map<'a, &'a str, &'a str>, + + /// One or more values that can be substituted in an expression + expression_attribute_values: Map<'a, &'a str, AttributeValue<'a>>, + + /// A map of attribute name/value pairs, one for each attribute + item: Map<'a, &'a str, AttributeValue<'a>>, + + /// Use ReturnValues if you want to get the item attributes as they appeared + /// before they were updated with the PutItem request. + #[serde(skip_serializing_if = "Option::is_none")] + return_values: Option, + + /// An optional parameter that returns the item attributes for a PutItem operation + /// that failed a condition check. + #[serde(skip_serializing_if = "Option::is_none")] + return_values_on_condition_check_failure: Option, +} + +#[derive(Deserialize)] +struct ErrorResponse<'a> { + #[serde(rename = "__type")] + error: &'a str, + + #[serde(borrow, default, rename = "Item")] + item: HashMap<&'a str, AttributeValue<'a>>, +} + +#[derive(Serialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +enum ReturnValues { + AllOld, +} + +/// A collection of key value pairs +/// +/// This provides cheap, ordered serialization of maps +struct Map<'a, K, V>(&'a [(K, V)]); + +impl<'a, K: Serialize, V: Serialize> Serialize for Map<'a, K, V> { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + if self.0.is_empty() { + return serializer.serialize_none(); + } + let mut map = serializer.serialize_map(Some(self.0.len()))?; + for (k, v) in self.0 { + map.serialize_entry(k, v)? + } + map.end() + } +} + +/// A DynamoDB [AttributeValue] +/// +/// [AttributeValue]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeValue.html +#[derive(Debug, Serialize, Deserialize)] +enum AttributeValue<'a> { + #[serde(rename = "S")] + String(&'a str), + #[serde(rename = "N", with = "number")] + Number(usize), +} + +/// Numbers are serialized as strings +mod number { + use serde::{Deserialize, Deserializer, Serializer}; + + pub fn serialize(v: &usize, s: S) -> Result { + s.serialize_str(&v.to_string()) + } + + pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result { + let v: &str = Deserialize::deserialize(d)?; + v.parse().map_err(serde::de::Error::custom) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_attribute_serde() { + let serde = serde_json::to_string(&AttributeValue::Number(23)).unwrap(); + assert_eq!(serde, "{\"N\":\"23\"}"); + let back: AttributeValue<'_> = serde_json::from_str(&serde).unwrap(); + assert!(matches!(back, AttributeValue::Number(23))); + } +} diff --git a/object_store/src/aws/mod.rs b/object_store/src/aws/mod.rs index cbb3cffdf494..6f4f1e35b949 100644 --- a/object_store/src/aws/mod.rs +++ b/object_store/src/aws/mod.rs @@ -58,11 +58,13 @@ mod builder; mod checksum; mod client; mod credential; +mod dynamo; mod precondition; mod resolve; pub use builder::{AmazonS3Builder, AmazonS3ConfigKey}; pub use checksum::Checksum; +pub use dynamo::DynamoCommit; pub use precondition::{S3ConditionalPut, S3CopyIfNotExists}; pub use resolve::resolve_bucket_region; @@ -93,19 +95,19 @@ pub struct AmazonS3 { impl std::fmt::Display for AmazonS3 { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "AmazonS3({})", self.client.config().bucket) + write!(f, "AmazonS3({})", self.client.config.bucket) } } impl AmazonS3 { /// Returns the [`AwsCredentialProvider`] used by [`AmazonS3`] pub fn credentials(&self) -> &AwsCredentialProvider { - &self.client.config().credentials + &self.client.config.credentials } /// Create a full URL to the resource specified by `path` with this instance's configuration. fn path_url(&self, path: &Path) -> String { - self.client.config().path_url(path) + self.client.config.path_url(path) } } @@ -145,7 +147,7 @@ impl Signer for AmazonS3 { /// ``` async fn signed_url(&self, method: Method, path: &Path, expires_in: Duration) -> Result { let credential = self.credentials().get_credential().await?; - let authorizer = AwsAuthorizer::new(&credential, "s3", &self.client.config().region); + let authorizer = AwsAuthorizer::new(&credential, "s3", &self.client.config.region); let path_url = self.path_url(path); let mut url = Url::parse(&path_url).map_err(|e| crate::Error::Generic { @@ -164,15 +166,15 @@ impl ObjectStore for AmazonS3 { async fn put_opts(&self, location: &Path, bytes: Bytes, opts: PutOptions) -> Result { let mut request = self.client.put_request(location, bytes); let tags = opts.tags.encoded(); - if !tags.is_empty() && !self.client.config().disable_tagging { + if !tags.is_empty() && !self.client.config.disable_tagging { request = request.header(&TAGS_HEADER, tags); } - match (opts.mode, &self.client.config().conditional_put) { - (PutMode::Overwrite, _) => request.send().await, + match (opts.mode, &self.client.config.conditional_put) { + (PutMode::Overwrite, _) => request.do_put().await, (PutMode::Create | PutMode::Update(_), None) => Err(Error::NotImplemented), (PutMode::Create, Some(S3ConditionalPut::ETagMatch)) => { - match request.header(&IF_NONE_MATCH, "*").send().await { + match request.header(&IF_NONE_MATCH, "*").do_put().await { // Technically If-None-Match should return NotModified but some stores, // such as R2, instead return PreconditionFailed // https://developers.cloudflare.com/r2/api/s3/extensions/#conditional-operations-in-putobject @@ -190,7 +192,7 @@ impl ObjectStore for AmazonS3 { store: STORE, source: "ETag required for conditional put".to_string().into(), })?; - request.header(&IF_MATCH, etag.as_str()).send().await + request.header(&IF_MATCH, etag.as_str()).do_put().await } } } @@ -261,11 +263,29 @@ impl ObjectStore for AmazonS3 { } async fn copy(&self, from: &Path, to: &Path) -> Result<()> { - self.client.copy_request(from, to, true).await + self.client.copy_request(from, to).send().await?; + Ok(()) } async fn copy_if_not_exists(&self, from: &Path, to: &Path) -> Result<()> { - self.client.copy_request(from, to, false).await + match &self.client.config.copy_if_not_exists { + Some(S3CopyIfNotExists::Header(k, v)) => { + let req = self.client.copy_request(from, to); + match req.header(k, v).send().await { + Err(Error::Precondition { path, source }) => { + Err(Error::AlreadyExists { path, source }) + } + Err(e) => Err(e), + Ok(_) => Ok(()), + } + } + Some(S3CopyIfNotExists::Dynamo(lock)) => { + lock.copy_if_not_exists(&self.client, from, to).await + } + None => Err(Error::NotSupported { + source: "S3 does not support copy-if-not-exists".to_string().into(), + }), + } } } @@ -335,8 +355,8 @@ mod tests { let config = AmazonS3Builder::from_env(); let integration = config.build().unwrap(); - let config = integration.client.config(); - let is_local = config.endpoint.starts_with("http://"); + let config = &integration.client.config; + let is_local = matches!(&config.endpoint, Some(e) if e.starts_with("http://")); let test_not_exists = config.copy_if_not_exists.is_some(); let test_conditional_put = config.conditional_put.is_some(); diff --git a/object_store/src/aws/precondition.rs b/object_store/src/aws/precondition.rs index a50b57fe23f7..b59699c4f2cf 100644 --- a/object_store/src/aws/precondition.rs +++ b/object_store/src/aws/precondition.rs @@ -15,6 +15,7 @@ // specific language governing permissions and limitations // under the License. +use crate::aws::dynamo::DynamoCommit; use crate::config::Parse; /// Configure how to provide [`ObjectStore::copy_if_not_exists`] for [`AmazonS3`]. @@ -38,12 +39,21 @@ pub enum S3CopyIfNotExists { /// /// [`ObjectStore::copy_if_not_exists`]: crate::ObjectStore::copy_if_not_exists Header(String, String), + /// The name of a DynamoDB table to use for coordination + /// + /// Encoded as `dynamodb:` ignoring whitespace + /// + /// See [`DynamoCommit`] for more information + /// + /// This will use the same region, credentials and endpoint as configured for S3 + Dynamo(DynamoCommit), } impl std::fmt::Display for S3CopyIfNotExists { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Header(k, v) => write!(f, "header: {}: {}", k, v), + Self::Dynamo(lock) => write!(f, "dynamo: {}", lock.table_name()), } } } @@ -56,6 +66,7 @@ impl S3CopyIfNotExists { let (k, v) = value.split_once(':')?; Some(Self::Header(k.trim().to_string(), v.trim().to_string())) } + "dynamo" => Some(Self::Dynamo(DynamoCommit::new(value.trim().to_string()))), _ => None, } }