From 34b15793b40a8e4f9414c50a3113e60db3bc460d Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 2 Nov 2023 12:21:54 -0600 Subject: [PATCH 01/75] api shell --- src/client.rs | 1 + src/client/bulk_write.rs | 111 ++++++++++++++++++++++++++++++++++++ src/operation.rs | 1 + src/operation/bulk_write.rs | 89 +++++++++++++++++++++++++++++ src/results.rs | 6 +- 5 files changed, 205 insertions(+), 3 deletions(-) create mode 100644 src/client/bulk_write.rs create mode 100644 src/operation/bulk_write.rs diff --git a/src/client.rs b/src/client.rs index a240f97a0..4ab439eb5 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,5 +1,6 @@ pub mod action; pub mod auth; +pub(crate) mod bulk_write; #[cfg(feature = "in-use-encryption-unstable")] pub(crate) mod csfle; mod executor; diff --git a/src/client/bulk_write.rs b/src/client/bulk_write.rs new file mode 100644 index 000000000..2c12bd099 --- /dev/null +++ b/src/client/bulk_write.rs @@ -0,0 +1,111 @@ +#![allow(missing_docs, unused_variables, dead_code)] + +use crate::{ + bson::Document, + error::{Error, WriteError}, + options::UpdateModifications, + results::{DeleteResult, InsertOneResult, UpdateResult}, + Client, + Namespace, +}; +use bson::{Array, Bson}; +use std::collections::HashMap; + +impl Client { + pub async fn bulk_write( + models: impl IntoIterator, + options: impl Into>, + ) -> Result { + todo!() + } +} + +#[derive(Clone, Debug)] +#[non_exhaustive] +pub enum BulkWriteModel { + InsertOne { + namespace: Namespace, + document: Document, + }, + DeleteOne { + namespace: Namespace, + filter: Document, + collation: Option, + hint: Option, + }, + DeleteMany { + namespace: Namespace, + filter: Document, + collation: Option, + hint: Option, + }, + ReplaceOne { + namespace: Namespace, + filter: Document, + replacement: Document, + collation: Option, + hint: Option, + upsert: Option, + let_vars: Option, + }, + UpdateOne { + namespace: Namespace, + filter: Document, + update: UpdateModifications, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, + let_vars: Option, + }, + UpdateMany { + namespace: Namespace, + filter: Document, + update: UpdateModifications, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, + }, +} + +#[derive(Clone, Debug)] +#[non_exhaustive] +pub struct BulkWriteOptions { + pub ordered: Option, + pub bypass_document_validation: Option, + pub comment: Option, + pub let_vars: Option, + pub verbose_results: Option, +} + +#[derive(Clone, Debug)] +#[non_exhaustive] +pub struct BulkWriteResult { + pub inserted_count: i32, + pub upserted_count: i32, + pub matched_count: i32, + pub modified_count: i32, + pub deleted_count: i32, + pub insert_results: HashMap, + pub update_results: HashMap, + pub delete_results: HashMap, +} + +#[derive(Clone, Debug)] +#[non_exhaustive] +pub struct BulkWriteError { + error: Option, + write_errors: Vec, + write_result: Option, + processed_requests: Vec, + unprocessed_requests: Vec, +} + +#[derive(Clone, Debug)] +#[non_exhaustive] +pub struct BulkWriteOperationError { + index: i32, + request: BulkWriteModel, + error: WriteError, +} diff --git a/src/operation.rs b/src/operation.rs index 1042010d8..7f5511638 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -1,5 +1,6 @@ mod abort_transaction; pub(crate) mod aggregate; +mod bulk_write; mod commit_transaction; pub(crate) mod count; pub(crate) mod count_documents; diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs new file mode 100644 index 000000000..812eab71b --- /dev/null +++ b/src/operation/bulk_write.rs @@ -0,0 +1,89 @@ +#![allow(unused_variables, dead_code)] + +use crate::{ + bson::RawDocumentBuf, + client::bulk_write::{BulkWriteModel, BulkWriteOptions, BulkWriteResult}, + operation::OperationWithDefaults, +}; + +pub(crate) struct BulkWrite { + models: Vec, + options: Option, +} + +impl OperationWithDefaults for BulkWrite { + type O = BulkWriteResult; + + type Command = RawDocumentBuf; + + const NAME: &'static str = "bulkWrite"; + + fn build( + &mut self, + description: &crate::cmap::StreamDescription, + ) -> crate::error::Result> { + todo!() + } + + fn serialize_command( + &mut self, + cmd: crate::cmap::Command, + ) -> crate::error::Result> { + todo!() + } + + fn extract_at_cluster_time( + &self, + _response: &bson::RawDocument, + ) -> crate::error::Result> { + todo!() + } + + fn handle_response( + &self, + response: crate::cmap::RawCommandResponse, + description: &crate::cmap::StreamDescription, + ) -> crate::error::Result { + todo!() + } + + fn handle_error(&self, error: crate::error::Error) -> crate::error::Result { + todo!() + } + + fn selection_criteria(&self) -> Option<&crate::selection_criteria::SelectionCriteria> { + todo!() + } + + fn is_acknowledged(&self) -> bool { + todo!() + } + + fn write_concern(&self) -> Option<&crate::options::WriteConcern> { + todo!() + } + + fn supports_read_concern(&self, _description: &crate::cmap::StreamDescription) -> bool { + todo!() + } + + fn supports_sessions(&self) -> bool { + todo!() + } + + fn retryability(&self) -> super::Retryability { + todo!() + } + + fn update_for_retry(&mut self) { + todo!() + } + + fn pinned_connection(&self) -> Option<&crate::cmap::conn::PinnedConnectionHandle> { + todo!() + } + + fn name(&self) -> &str { + todo!() + } +} diff --git a/src/results.rs b/src/results.rs index f15e5c852..865a6b154 100644 --- a/src/results.rs +++ b/src/results.rs @@ -15,7 +15,7 @@ use serde::{Deserialize, Serialize}; /// The result of a [`Collection::insert_one`](../struct.Collection.html#method.insert_one) /// operation. -#[derive(Debug, Serialize)] +#[derive(Clone, Debug, Serialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] pub struct InsertOneResult { @@ -51,7 +51,7 @@ impl InsertManyResult { /// The result of a [`Collection::update_one`](../struct.Collection.html#method.update_one) or /// [`Collection::update_many`](../struct.Collection.html#method.update_many) operation. -#[derive(Debug, Serialize)] +#[derive(Clone, Debug, Serialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] pub struct UpdateResult { @@ -69,7 +69,7 @@ pub struct UpdateResult { /// The result of a [`Collection::delete_one`](../struct.Collection.html#method.delete_one) or /// [`Collection::delete_many`](../struct.Collection.html#method.delete_many) operation. -#[derive(Debug, Serialize)] +#[derive(Clone, Debug, Serialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] pub struct DeleteResult { From 8e387ab3074aa62ae117ece703986de44656b787 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 10 Nov 2023 12:41:30 -0700 Subject: [PATCH 02/75] basic command building and outcome testing --- src/client/bulk_write.rs | 123 +++++----------- src/client/bulk_write/models.rs | 115 +++++++++++++++ src/coll.rs | 24 +++- src/hello.rs | 2 +- src/operation.rs | 1 + src/operation/bulk_write.rs | 131 +++++++++--------- src/test.rs | 1 + src/test/bulk_write.rs | 6 + .../new-bulk-write/deleteMany-basic.json | 86 ++++++++++++ .../new-bulk-write/deleteOne-basic.json | 81 +++++++++++ .../new-bulk-write/insertOne-basic.json | 95 +++++++++++++ .../new-bulk-write/replaceOne-basic.json | 93 +++++++++++++ .../new-bulk-write/updateMany-basic.json | 103 ++++++++++++++ .../new-bulk-write/updateOne-basic.json | 95 +++++++++++++ src/test/spec/unified_runner/operation.rs | 5 +- .../unified_runner/operation/bulk_write.rs | 95 +++++++++++++ src/test/spec/unified_runner/test_runner.rs | 1 - 17 files changed, 898 insertions(+), 159 deletions(-) create mode 100644 src/client/bulk_write/models.rs create mode 100644 src/test/bulk_write.rs create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json create mode 100644 src/test/spec/unified_runner/operation/bulk_write.rs diff --git a/src/client/bulk_write.rs b/src/client/bulk_write.rs index 2c12bd099..32a4ab40e 100644 --- a/src/client/bulk_write.rs +++ b/src/client/bulk_write.rs @@ -1,111 +1,56 @@ #![allow(missing_docs, unused_variables, dead_code)] +pub(crate) mod models; + +use serde::{Deserialize, Serialize, Serializer}; +use serde_with::skip_serializing_none; + use crate::{ - bson::Document, - error::{Error, WriteError}, - options::UpdateModifications, - results::{DeleteResult, InsertOneResult, UpdateResult}, + bson::{Bson, Document}, + error::Result, + operation::BulkWrite, Client, - Namespace, }; -use bson::{Array, Bson}; -use std::collections::HashMap; + +use models::{add_ids_to_insert_one_models, WriteModel}; impl Client { pub async fn bulk_write( - models: impl IntoIterator, + &self, + models: impl IntoIterator, options: impl Into>, - ) -> Result { - todo!() - } -} + ) -> Result<()> { + let mut models: Vec<_> = models.into_iter().collect(); + let inserted_ids = add_ids_to_insert_one_models(&mut models)?; -#[derive(Clone, Debug)] -#[non_exhaustive] -pub enum BulkWriteModel { - InsertOne { - namespace: Namespace, - document: Document, - }, - DeleteOne { - namespace: Namespace, - filter: Document, - collation: Option, - hint: Option, - }, - DeleteMany { - namespace: Namespace, - filter: Document, - collation: Option, - hint: Option, - }, - ReplaceOne { - namespace: Namespace, - filter: Document, - replacement: Document, - collation: Option, - hint: Option, - upsert: Option, - let_vars: Option, - }, - UpdateOne { - namespace: Namespace, - filter: Document, - update: UpdateModifications, - array_filters: Option, - collation: Option, - hint: Option, - upsert: Option, - let_vars: Option, - }, - UpdateMany { - namespace: Namespace, - filter: Document, - update: UpdateModifications, - array_filters: Option, - collation: Option, - hint: Option, - upsert: Option, - }, + let bulk_write = BulkWrite { + models: models, + options: options.into(), + client: self.clone(), + }; + self.execute_operation(bulk_write, None).await.map(|_| ()) + } } -#[derive(Clone, Debug)] +#[skip_serializing_none] +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] #[non_exhaustive] pub struct BulkWriteOptions { pub ordered: Option, pub bypass_document_validation: Option, pub comment: Option, + #[serde(rename = "let")] pub let_vars: Option, + #[serialize_always] + #[serde(rename = "errorsOnly", serialize_with = "serialize_opposite_bool")] pub verbose_results: Option, } -#[derive(Clone, Debug)] -#[non_exhaustive] -pub struct BulkWriteResult { - pub inserted_count: i32, - pub upserted_count: i32, - pub matched_count: i32, - pub modified_count: i32, - pub deleted_count: i32, - pub insert_results: HashMap, - pub update_results: HashMap, - pub delete_results: HashMap, -} - -#[derive(Clone, Debug)] -#[non_exhaustive] -pub struct BulkWriteError { - error: Option, - write_errors: Vec, - write_result: Option, - processed_requests: Vec, - unprocessed_requests: Vec, -} - -#[derive(Clone, Debug)] -#[non_exhaustive] -pub struct BulkWriteOperationError { - index: i32, - request: BulkWriteModel, - error: WriteError, +fn serialize_opposite_bool( + val: &Option, + serializer: S, +) -> std::result::Result { + let val = !val.unwrap_or(false); + serializer.serialize_bool(val) } diff --git a/src/client/bulk_write/models.rs b/src/client/bulk_write/models.rs new file mode 100644 index 000000000..60aee5e06 --- /dev/null +++ b/src/client/bulk_write/models.rs @@ -0,0 +1,115 @@ +use std::collections::HashMap; + +use serde::Serialize; +use serde_with::skip_serializing_none; + +use crate::{ + bson::{oid::ObjectId, Bson, Document, RawDocumentBuf}, + error::Result, + options::UpdateModifications, + Namespace, +}; + +#[skip_serializing_none] +#[derive(Clone, Debug, Serialize)] +#[serde(untagged, rename_all = "camelCase")] +#[non_exhaustive] +pub enum WriteModel { + #[non_exhaustive] + InsertOne { + #[serde(skip)] + namespace: Namespace, + document: Document, + }, + #[non_exhaustive] + UpdateOne { + #[serde(skip)] + namespace: Namespace, + filter: Document, + #[serde(rename = "updateMods")] + update: UpdateModifications, + }, + #[non_exhaustive] + UpdateMany { + #[serde(skip)] + namespace: Namespace, + filter: Document, + #[serde(rename = "updateMods")] + update: UpdateModifications, + }, + #[non_exhaustive] + ReplaceOne { + #[serde(skip)] + namespace: Namespace, + filter: Document, + #[serde(rename = "updateMods")] + replacement: Document, + }, + #[non_exhaustive] + DeleteOne { + #[serde(skip)] + namespace: Namespace, + filter: Document, + }, + #[non_exhaustive] + DeleteMany { + #[serde(skip)] + namespace: Namespace, + filter: Document, + }, +} + +impl WriteModel { + pub(crate) fn namespace(&self) -> &Namespace { + match self { + Self::InsertOne { namespace, .. } => namespace, + Self::UpdateOne { namespace, .. } => namespace, + Self::UpdateMany { namespace, .. } => namespace, + Self::ReplaceOne { namespace, .. } => namespace, + Self::DeleteOne { namespace, .. } => namespace, + Self::DeleteMany { namespace, .. } => namespace, + } + } + + pub(crate) fn operation_name(&self) -> &'static str { + match self { + Self::DeleteOne { .. } | Self::DeleteMany { .. } => "delete", + Self::InsertOne { .. } => "insert", + Self::ReplaceOne { .. } | Self::UpdateOne { .. } | Self::UpdateMany { .. } => "update", + } + } + + pub(crate) fn to_raw_doc(&self) -> Result { + let mut doc = bson::to_raw_document_buf(&self)?; + match self { + Self::UpdateOne { .. } | Self::ReplaceOne { .. } | Self::DeleteOne { .. } => { + doc.append("multi", false); + } + Self::UpdateMany { .. } | Self::DeleteMany { .. } => { + doc.append("multi", true); + } + _ => {} + } + Ok(doc) + } +} + +pub(crate) fn add_ids_to_insert_one_models( + models: &mut [WriteModel], +) -> Result> { + let mut ids = HashMap::new(); + for (i, model) in models.iter_mut().enumerate() { + if let WriteModel::InsertOne { document, .. } = model { + let id = match document.get("_id") { + Some(id) => id.clone(), + None => { + let id = ObjectId::new(); + document.insert("_id", id); + Bson::ObjectId(id) + } + }; + ids.insert(i, id); + } + } + Ok(ids) +} diff --git a/src/coll.rs b/src/coll.rs index 001484866..8e2fad232 100644 --- a/src/coll.rs +++ b/src/coll.rs @@ -740,7 +740,7 @@ where } /// A struct modeling the canonical name for a collection in MongoDB. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Namespace { /// The name of the database associated with this namespace. pub db: String, @@ -793,9 +793,25 @@ impl<'de> Deserialize<'de> for Namespace { where D: Deserializer<'de>, { - let s: String = Deserialize::deserialize(deserializer)?; - Self::from_str(&s) - .ok_or_else(|| D::Error::custom("Missing one or more fields in namespace")) + #[derive(Deserialize)] + struct NamespaceHelper { + db: String, + coll: String, + } + #[derive(Deserialize)] + #[serde(untagged)] + enum NamespaceOptions { + String(String), + Object(NamespaceHelper), + } + match NamespaceOptions::deserialize(deserializer)? { + NamespaceOptions::String(string) => Self::from_str(&string) + .ok_or_else(|| D::Error::custom("Missing one or more fields in namespace")), + NamespaceOptions::Object(object) => Ok(Self { + db: object.db, + coll: object.coll, + }), + } } } diff --git a/src/hello.rs b/src/hello.rs index 88b547d7e..1b2625fbc 100644 --- a/src/hello.rs +++ b/src/hello.rs @@ -130,8 +130,8 @@ pub(crate) struct HelloCommandResponse { /// Whether the server is an arbiter. pub arbiter_only: Option, - #[serde(rename = "isreplicaset")] + #[serde(rename = "isreplicaset")] /// Whether the server is a replica set. pub is_replica_set: Option, diff --git a/src/operation.rs b/src/operation.rs index 7f5511638..011f7f070 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -53,6 +53,7 @@ use crate::{ }; pub(crate) use abort_transaction::AbortTransaction; +pub(crate) use bulk_write::BulkWrite; pub(crate) use commit_transaction::CommitTransaction; pub(crate) use create_indexes::CreateIndexes; pub(crate) use delete::Delete; diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 812eab71b..c8409f620 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -1,89 +1,94 @@ #![allow(unused_variables, dead_code)] +use std::collections::HashMap; + use crate::{ - bson::RawDocumentBuf, - client::bulk_write::{BulkWriteModel, BulkWriteOptions, BulkWriteResult}, + bson::{rawdoc, RawArrayBuf, RawDocumentBuf}, + bson_util, + client::bulk_write::{models::WriteModel, BulkWriteOptions}, + cmap::{Command, RawCommandResponse, StreamDescription}, + error::{Error, Result}, operation::OperationWithDefaults, + Client, + Namespace, }; pub(crate) struct BulkWrite { - models: Vec, - options: Option, + pub(crate) models: Vec, + pub(crate) options: Option, + pub(crate) client: Client, +} +/// A helper struct for tracking namespace information. +struct NamespaceInfo<'a> { + namespaces: RawArrayBuf, + // Cache the namespaces and their indexes to avoid traversing the namespaces array each time a + // namespace is looked up or added. + cache: HashMap<&'a Namespace, usize>, } -impl OperationWithDefaults for BulkWrite { - type O = BulkWriteResult; - - type Command = RawDocumentBuf; - - const NAME: &'static str = "bulkWrite"; - - fn build( - &mut self, - description: &crate::cmap::StreamDescription, - ) -> crate::error::Result> { - todo!() - } - - fn serialize_command( - &mut self, - cmd: crate::cmap::Command, - ) -> crate::error::Result> { - todo!() - } - - fn extract_at_cluster_time( - &self, - _response: &bson::RawDocument, - ) -> crate::error::Result> { - todo!() +impl<'a> NamespaceInfo<'a> { + fn new() -> Self { + Self { + namespaces: RawArrayBuf::new(), + cache: HashMap::new(), + } } - fn handle_response( - &self, - response: crate::cmap::RawCommandResponse, - description: &crate::cmap::StreamDescription, - ) -> crate::error::Result { - todo!() + /// Gets the index for the given namespace in the nsInfo list, adding it to the list if it is + /// not already present. + fn get_index(&mut self, namespace: &'a Namespace) -> usize { + match self.cache.get(namespace) { + Some(index) => *index, + None => { + self.namespaces + .push(rawdoc! { "ns": namespace.to_string() }); + let next_index = self.cache.len(); + self.cache.insert(namespace, next_index); + next_index + } + } } +} - fn handle_error(&self, error: crate::error::Error) -> crate::error::Result { - todo!() - } +impl OperationWithDefaults for BulkWrite { + type O = (); - fn selection_criteria(&self) -> Option<&crate::selection_criteria::SelectionCriteria> { - todo!() - } + type Command = RawDocumentBuf; - fn is_acknowledged(&self) -> bool { - todo!() - } + const NAME: &'static str = "bulkWrite"; - fn write_concern(&self) -> Option<&crate::options::WriteConcern> { - todo!() - } + fn build(&mut self, description: &StreamDescription) -> Result> { + let mut namespace_info = NamespaceInfo::new(); + let mut ops = RawArrayBuf::new(); + for model in &self.models { + let namespace_index = namespace_info.get_index(model.namespace()); - fn supports_read_concern(&self, _description: &crate::cmap::StreamDescription) -> bool { - todo!() - } + let mut model_doc = rawdoc! { model.operation_name(): namespace_index as i32 }; + let model_fields = model.to_raw_doc()?; + bson_util::extend_raw_document_buf(&mut model_doc, model_fields)?; - fn supports_sessions(&self) -> bool { - todo!() - } + ops.push(model_doc); + } - fn retryability(&self) -> super::Retryability { - todo!() - } + let command = rawdoc! { + Self::NAME: 1, + "ops": ops, + "nsInfo": namespace_info.namespaces, + }; - fn update_for_retry(&mut self) { - todo!() + Ok(Command::new(Self::NAME, "admin", command)) } - fn pinned_connection(&self) -> Option<&crate::cmap::conn::PinnedConnectionHandle> { - todo!() + fn handle_response( + &self, + response: RawCommandResponse, + description: &StreamDescription, + ) -> Result { + response.body::()?; + Ok(()) } - fn name(&self) -> &str { - todo!() + fn handle_error(&self, error: Error) -> Result { + Err(error) } } diff --git a/src/test.rs b/src/test.rs index 4e1a52c70..9acb760ae 100644 --- a/src/test.rs +++ b/src/test.rs @@ -2,6 +2,7 @@ mod atlas_connectivity; mod atlas_planned_maintenance_testing; #[cfg(feature = "aws-auth")] mod auth_aws; +mod bulk_write; mod change_stream; mod client; mod coll; diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs new file mode 100644 index 000000000..dc37460c7 --- /dev/null +++ b/src/test/bulk_write.rs @@ -0,0 +1,6 @@ +use crate::test::spec::unified_runner::run_unified_tests; + +#[tokio::test] +async fn run_unified() { + run_unified_tests(&["crud", "unified", "new-bulk-write"]).await; +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json new file mode 100644 index 000000000..3f94f8c93 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json @@ -0,0 +1,86 @@ +{ + "description": "Client bulkWrite deleteMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "Client bulkWrite deleteMany", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$lte": 2 + } + } + } + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json new file mode 100644 index 000000000..080fdd2b5 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json @@ -0,0 +1,81 @@ +{ + "description": "Client bulkWrite deleteOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Client bulkWrite deleteOne", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + } + } + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json new file mode 100644 index 000000000..83b692bbf --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json @@ -0,0 +1,95 @@ +{ + "description": "Client bulkWrite insertOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "Client bulkWrite insertOne", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": 2 + } + } + }, + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": 3 + } + } + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json new file mode 100644 index 000000000..29680a767 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json @@ -0,0 +1,93 @@ +{ + "description": "Client bulkWrite replaceOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Client bulkWrite replaceOne", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 222 + } + } + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 222 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json new file mode 100644 index 000000000..d4eb9caac --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json @@ -0,0 +1,103 @@ +{ + "description": "Client bulkWrite updateMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Client bulkWrite updateMany", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json new file mode 100644 index 000000000..ded819189 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json @@ -0,0 +1,95 @@ +{ + "description": "Client bulkWrite updateOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Client bulkWrite updateOne", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index 143a7b225..e1bf41b02 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -1,3 +1,5 @@ +mod bulk_write; + #[cfg(feature = "in-use-encryption-unstable")] mod csfle; #[cfg(feature = "in-use-encryption-unstable")] @@ -77,7 +79,7 @@ use crate::{ runtime, selection_criteria::ReadPreference, serde_util, - test::FailPoint, + test::{spec::unified_runner::operation::bulk_write::BulkWrite, FailPoint}, Collection, Database, IndexModel, @@ -399,6 +401,7 @@ impl<'de> Deserialize<'de> for Operation { "updateSearchIndex" => { deserialize_op::(definition.arguments) } + "bulkWrite" => deserialize_op::(definition.arguments), s => Ok(Box::new(UnimplementedOperation { _name: s.to_string(), }) as Box), diff --git a/src/test/spec/unified_runner/operation/bulk_write.rs b/src/test/spec/unified_runner/operation/bulk_write.rs new file mode 100644 index 000000000..d05efae6b --- /dev/null +++ b/src/test/spec/unified_runner/operation/bulk_write.rs @@ -0,0 +1,95 @@ +use futures_core::future::BoxFuture; +use futures_util::FutureExt; +use serde::Deserialize; + +use crate::{ + bson::Document, + client::bulk_write::models::WriteModel, + coll::options::UpdateModifications, + error::Result, + test::spec::unified_runner::{Entity, TestRunner}, + Namespace, +}; + +use super::TestOperation; + +#[derive(Debug, Deserialize)] +pub(super) struct BulkWrite { + requests: Vec, +} + +impl<'de> Deserialize<'de> for WriteModel { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + use serde::de::Error as DeError; + + #[derive(Deserialize)] + struct WriteModelHelper { + namespace: Namespace, + document: Option, + filter: Option, + update: Option, + replacement: Option, + } + + let model_doc = Document::deserialize(deserializer)?; + let Some((key, value)) = model_doc.into_iter().next() else { + return Err(DeError::custom("empty write model")); + }; + let body: WriteModelHelper = bson::from_bson(value).map_err(DeError::custom)?; + + let model = match key.as_str() { + "insertOne" => WriteModel::InsertOne { + namespace: body.namespace, + document: body.document.unwrap(), + }, + "updateOne" => WriteModel::UpdateOne { + namespace: body.namespace, + filter: body.filter.unwrap(), + update: body.update.unwrap(), + }, + "updateMany" => WriteModel::UpdateMany { + namespace: body.namespace, + filter: body.filter.unwrap(), + update: body.update.unwrap(), + }, + "replaceOne" => WriteModel::ReplaceOne { + namespace: body.namespace, + filter: body.filter.unwrap(), + replacement: body.replacement.unwrap(), + }, + "deleteOne" => WriteModel::DeleteOne { + namespace: body.namespace, + filter: body.filter.unwrap(), + }, + "deleteMany" => WriteModel::DeleteMany { + namespace: body.namespace, + filter: body.filter.unwrap(), + }, + other => { + return Err(DeError::custom(format!( + "unknown bulkWrite operation: {other}" + ))) + } + }; + + Ok(model) + } +} + +impl TestOperation for BulkWrite { + fn execute_entity_operation<'a>( + &'a self, + id: &'a str, + test_runner: &'a TestRunner, + ) -> BoxFuture<'a, Result>> { + async move { + let client = test_runner.get_client(id).await; + client.bulk_write(self.requests.clone(), None).await?; + Ok(None) + } + .boxed() + } +} diff --git a/src/test/spec/unified_runner/test_runner.rs b/src/test/spec/unified_runner/test_runner.rs index 2c6db3b5e..c7a609293 100644 --- a/src/test/spec/unified_runner/test_runner.rs +++ b/src/test/spec/unified_runner/test_runner.rs @@ -65,7 +65,6 @@ use crate::test::{ }; const SKIPPED_OPERATIONS: &[&str] = &[ - "bulkWrite", "count", "listCollectionObjects", "listDatabaseObjects", From c4ffe010c318781171101c005170f8b7d885be40 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 7 Dec 2023 13:10:15 -0700 Subject: [PATCH 03/75] add results --- src/client/bulk_write.rs | 24 +-- src/client/bulk_write/actions.rs | 104 ++++++++++++ src/client/bulk_write/results.rs | 107 ++++++++++++ src/operation.rs | 10 +- src/operation/bulk_write.rs | 67 +++++++- src/operation/update.rs | 7 +- src/serde_util.rs | 16 +- .../new-bulk-write/deleteMany-basic.json | 130 +++++++++++++- .../new-bulk-write/deleteOne-basic.json | 126 +++++++++++++- .../new-bulk-write/insertOne-basic.json | 101 ++++++++++- .../new-bulk-write/replaceOne-basic.json | 148 +++++++++++++++- .../new-bulk-write/updateMany-basic.json | 160 +++++++++++++++++- .../new-bulk-write/updateOne-basic.json | 152 ++++++++++++++++- src/test/spec/unified_runner/operation.rs | 6 +- .../unified_runner/operation/bulk_write.rs | 13 +- 15 files changed, 1125 insertions(+), 46 deletions(-) create mode 100644 src/client/bulk_write/actions.rs create mode 100644 src/client/bulk_write/results.rs diff --git a/src/client/bulk_write.rs b/src/client/bulk_write.rs index 32a4ab40e..adcd44be0 100644 --- a/src/client/bulk_write.rs +++ b/src/client/bulk_write.rs @@ -1,39 +1,31 @@ #![allow(missing_docs, unused_variables, dead_code)] +pub(crate) mod actions; pub(crate) mod models; +pub(crate) mod results; use serde::{Deserialize, Serialize, Serializer}; use serde_with::skip_serializing_none; use crate::{ bson::{Bson, Document}, - error::Result, - operation::BulkWrite, Client, }; -use models::{add_ids_to_insert_one_models, WriteModel}; +use actions::SummaryBulkWriteAction; +use models::WriteModel; impl Client { - pub async fn bulk_write( + pub fn bulk_write( &self, models: impl IntoIterator, - options: impl Into>, - ) -> Result<()> { - let mut models: Vec<_> = models.into_iter().collect(); - let inserted_ids = add_ids_to_insert_one_models(&mut models)?; - - let bulk_write = BulkWrite { - models: models, - options: options.into(), - client: self.clone(), - }; - self.execute_operation(bulk_write, None).await.map(|_| ()) + ) -> SummaryBulkWriteAction { + SummaryBulkWriteAction::new(self.clone(), models.into_iter().collect()) } } #[skip_serializing_none] -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] pub struct BulkWriteOptions { diff --git a/src/client/bulk_write/actions.rs b/src/client/bulk_write/actions.rs new file mode 100644 index 000000000..de48a5487 --- /dev/null +++ b/src/client/bulk_write/actions.rs @@ -0,0 +1,104 @@ +use std::future::IntoFuture; + +use futures_core::future::BoxFuture; +use futures_util::FutureExt; + +use crate::{ + error::{ErrorKind, Result}, + operation::BulkWrite, + Client, +}; + +use super::{ + models::add_ids_to_insert_one_models, + results::{SummaryBulkWriteResult, VerboseBulkWriteResult}, + BulkWriteOptions, + WriteModel, +}; + +pub struct VerboseBulkWriteAction { + client: Client, + models: Vec, + options: BulkWriteOptions, +} + +impl IntoFuture for VerboseBulkWriteAction { + type Output = Result; + type IntoFuture = BoxFuture<'static, Self::Output>; + + fn into_future(mut self) -> Self::IntoFuture { + async move { + let inserted_ids = add_ids_to_insert_one_models(&mut self.models)?; + + let bulk_write = BulkWrite { + models: &self.models, + options: self.options, + client: self.client.clone(), + }; + let (mut cursor, summary_info) = + self.client.execute_operation(bulk_write, None).await?; + + let mut results = VerboseBulkWriteResult::new(summary_info, inserted_ids); + while cursor.advance().await? { + let response = cursor.deserialize_current()?; + let model = + self.models + .get(response.index) + .ok_or_else(|| ErrorKind::InvalidResponse { + message: format!( + "unknown index returned from bulkWrite: {}", + response.index + ), + })?; + + match model { + WriteModel::InsertOne { .. } => { + debug_assert!(!response.is_update_result()); + } + WriteModel::UpdateOne { .. } + | WriteModel::UpdateMany { .. } + | WriteModel::ReplaceOne { .. } => { + results.add_update_result(response)?; + } + WriteModel::DeleteOne { .. } | WriteModel::DeleteMany { .. } => { + debug_assert!(!response.is_update_result()); + results.add_delete_result(response); + } + } + } + + Ok(results) + } + .boxed() + } +} + +pub struct SummaryBulkWriteAction { + inner: VerboseBulkWriteAction, +} + +impl SummaryBulkWriteAction { + pub(crate) fn new(client: Client, models: Vec) -> Self { + Self { + inner: VerboseBulkWriteAction { + client, + models, + options: Default::default(), + }, + } + } + + pub fn verbose_results(mut self) -> VerboseBulkWriteAction { + self.inner.options.verbose_results = Some(true); + self.inner + } +} + +impl IntoFuture for SummaryBulkWriteAction { + type Output = Result; + type IntoFuture = BoxFuture<'static, Self::Output>; + + fn into_future(self) -> Self::IntoFuture { + async move { self.inner.await.map(Into::into) }.boxed() + } +} diff --git a/src/client/bulk_write/results.rs b/src/client/bulk_write/results.rs new file mode 100644 index 000000000..e205264d8 --- /dev/null +++ b/src/client/bulk_write/results.rs @@ -0,0 +1,107 @@ +#![allow(missing_docs)] + +use std::collections::HashMap; + +use serde::Serialize; + +use crate::{ + bson::Bson, + error::{Error, ErrorKind, Result}, + operation::{BulkWriteOperationResponse, BulkWriteSummaryInfo}, + results::{DeleteResult, InsertOneResult, UpdateResult}, + serde_util::serialize_indexed_map, +}; + +#[derive(Clone, Debug, Default, Serialize)] +#[serde(rename_all = "camelCase")] +#[non_exhaustive] +pub struct VerboseBulkWriteResult { + pub inserted_count: i64, + pub upserted_count: i64, + pub matched_count: i64, + pub modified_count: i64, + pub deleted_count: i64, + #[serde(serialize_with = "serialize_indexed_map")] + pub insert_results: HashMap, + #[serde(serialize_with = "serialize_indexed_map")] + pub update_results: HashMap, + #[serde(serialize_with = "serialize_indexed_map")] + pub delete_results: HashMap, +} + +impl VerboseBulkWriteResult { + pub(crate) fn new( + summary_info: BulkWriteSummaryInfo, + inserted_ids: HashMap, + ) -> Self { + Self { + inserted_count: summary_info.n_inserted, + upserted_count: summary_info.n_upserted, + matched_count: summary_info.n_matched, + modified_count: summary_info.n_modified, + deleted_count: summary_info.n_deleted, + insert_results: inserted_ids + .into_iter() + .map(|(index, id)| (index, InsertOneResult { inserted_id: id })) + .collect(), + update_results: HashMap::new(), + delete_results: HashMap::new(), + } + } + + pub(crate) fn add_update_result(&mut self, response: BulkWriteOperationResponse) -> Result<()> { + self.update_results + .insert(response.index, response.try_into()?); + Ok(()) + } + + pub(crate) fn add_delete_result(&mut self, response: BulkWriteOperationResponse) { + let delete_result = DeleteResult { + deleted_count: response.n, + }; + self.delete_results.insert(response.index, delete_result); + } +} + +impl TryFrom for UpdateResult { + type Error = Error; + + fn try_from(response: BulkWriteOperationResponse) -> Result { + let modified_count = response + .n_modified + .ok_or_else(|| ErrorKind::InvalidResponse { + message: "missing nModified field in update operation response".into(), + })?; + Ok(Self { + matched_count: response.n, + modified_count, + upserted_id: response.upserted, + }) + } +} + +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +#[non_exhaustive] +pub struct SummaryBulkWriteResult { + pub inserted_count: i64, + pub upserted_count: i64, + pub matched_count: i64, + pub modified_count: i64, + pub deleted_count: i64, + #[serde(serialize_with = "serialize_indexed_map")] + pub insert_results: HashMap, +} + +impl From for SummaryBulkWriteResult { + fn from(verbose_result: VerboseBulkWriteResult) -> Self { + Self { + inserted_count: verbose_result.inserted_count, + upserted_count: verbose_result.upserted_count, + matched_count: verbose_result.matched_count, + modified_count: verbose_result.modified_count, + deleted_count: verbose_result.deleted_count, + insert_results: verbose_result.insert_results, + } + } +} diff --git a/src/operation.rs b/src/operation.rs index 011f7f070..5007786cb 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -53,7 +53,7 @@ use crate::{ }; pub(crate) use abort_transaction::AbortTransaction; -pub(crate) use bulk_write::BulkWrite; +pub(crate) use bulk_write::{BulkWrite, BulkWriteOperationResponse, BulkWriteSummaryInfo}; pub(crate) use commit_transaction::CommitTransaction; pub(crate) use create_indexes::CreateIndexes; pub(crate) use delete::Delete; @@ -244,7 +244,9 @@ pub(crate) fn append_options_to_raw_document( } #[derive(Deserialize, Debug)] -pub(crate) struct EmptyBody {} +pub(crate) struct SingleWriteBody { + n: u64, +} /// Body of a write response that could possibly have a write concern error but not write errors. #[derive(Debug, Deserialize, Default, Clone)] @@ -269,12 +271,10 @@ impl WriteConcernOnlyBody { } #[derive(Deserialize, Debug)] -pub(crate) struct WriteResponseBody { +pub(crate) struct WriteResponseBody { #[serde(flatten)] body: T, - n: u64, - #[serde(rename = "writeErrors")] write_errors: Option>, diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index c8409f620..b731a4da0 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -2,20 +2,26 @@ use std::collections::HashMap; +use serde::Deserialize; + use crate::{ - bson::{rawdoc, RawArrayBuf, RawDocumentBuf}, + bson::{rawdoc, Bson, RawArrayBuf, RawDocumentBuf}, bson_util, client::bulk_write::{models::WriteModel, BulkWriteOptions}, cmap::{Command, RawCommandResponse, StreamDescription}, + cursor::CursorSpecification, error::{Error, Result}, operation::OperationWithDefaults, Client, + Cursor, Namespace, }; -pub(crate) struct BulkWrite { - pub(crate) models: Vec, - pub(crate) options: Option, +use super::{CursorInfo, WriteResponseBody}; + +pub(crate) struct BulkWrite<'a> { + pub(crate) models: &'a [WriteModel], + pub(crate) options: BulkWriteOptions, pub(crate) client: Client, } /// A helper struct for tracking namespace information. @@ -50,8 +56,8 @@ impl<'a> NamespaceInfo<'a> { } } -impl OperationWithDefaults for BulkWrite { - type O = (); +impl<'a> OperationWithDefaults for BulkWrite<'a> { + type O = (Cursor, BulkWriteSummaryInfo); type Command = RawDocumentBuf; @@ -60,7 +66,7 @@ impl OperationWithDefaults for BulkWrite { fn build(&mut self, description: &StreamDescription) -> Result> { let mut namespace_info = NamespaceInfo::new(); let mut ops = RawArrayBuf::new(); - for model in &self.models { + for model in self.models { let namespace_index = namespace_info.get_index(model.namespace()); let mut model_doc = rawdoc! { model.operation_name(): namespace_index as i32 }; @@ -84,11 +90,54 @@ impl OperationWithDefaults for BulkWrite { response: RawCommandResponse, description: &StreamDescription, ) -> Result { - response.body::()?; - Ok(()) + let response: WriteResponseBody = response.body()?; + + let specification = CursorSpecification::new( + response.body.cursor, + description.server_address.clone(), + None, + None, + None, + ); + let cursor = Cursor::new(self.client.clone(), specification, None, None); + + Ok((cursor, response.body.summary)) } fn handle_error(&self, error: Error) -> Result { Err(error) } } + +#[derive(Deserialize)] +struct BulkWriteResponse { + cursor: CursorInfo, + #[serde(flatten)] + summary: BulkWriteSummaryInfo, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct BulkWriteSummaryInfo { + pub(crate) n_inserted: i64, + pub(crate) n_matched: i64, + pub(crate) n_modified: i64, + pub(crate) n_upserted: i64, + pub(crate) n_deleted: i64, +} + +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct BulkWriteOperationResponse { + #[serde(rename = "idx")] + pub(crate) index: usize, + pub(crate) n: u64, + pub(crate) n_modified: Option, + pub(crate) upserted: Option, +} + +impl BulkWriteOperationResponse { + pub(crate) fn is_update_result(&self) -> bool { + self.n_modified.is_some() || self.upserted.is_some() + } +} diff --git a/src/operation/update.rs b/src/operation/update.rs index 3d5d34fa0..8750ef774 100644 --- a/src/operation/update.rs +++ b/src/operation/update.rs @@ -198,7 +198,11 @@ impl<'a, T: Serialize> OperationWithDefaults for Update<'a, T> { .and_then(|doc| doc.get("_id")) .cloned(); - let matched_count = if upserted_id.is_some() { 0 } else { response.n }; + let matched_count = if upserted_id.is_some() { + 0 + } else { + response.body.n + }; Ok(UpdateResult { matched_count, @@ -224,6 +228,7 @@ impl<'a, T: Serialize> OperationWithDefaults for Update<'a, T> { #[derive(Deserialize)] pub(crate) struct UpdateBody { + n: u64, #[serde(rename = "nModified")] n_modified: u64, upserted: Option>, diff --git a/src/serde_util.rs b/src/serde_util.rs index 2b7a88951..78bfa80e5 100644 --- a/src/serde_util.rs +++ b/src/serde_util.rs @@ -1,4 +1,7 @@ -use std::time::Duration; +use std::{ + collections::{BTreeMap, HashMap}, + time::Duration, +}; use bson::SerializerOptions; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -191,3 +194,14 @@ where } Ok(Some(vec)) } + +pub(crate) fn serialize_indexed_map( + map: &HashMap, + serializer: S, +) -> std::result::Result { + let string_map: BTreeMap<_, _> = map + .iter() + .map(|(index, result)| (index.to_string(), result)) + .collect(); + string_map.serialize(serializer) +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json index 3f94f8c93..0cf8ca7f9 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json @@ -41,7 +41,121 @@ ], "tests": [ { - "description": "Client bulkWrite deleteMany", + "description": "Client bulkWrite deleteMany verbose", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$lte": 2 + } + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 2 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "Client bulkWrite deleteMany summary", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$lte": 2 + } + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "Client bulkWrite deleteMany defaults to summary", "runOnRequirements": [ { "minServerVersion": "7.0" @@ -67,6 +181,20 @@ } } ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } } } ], diff --git a/src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json index 080fdd2b5..477085517 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json @@ -38,7 +38,117 @@ ], "tests": [ { - "description": "Client bulkWrite deleteOne", + "description": "Client bulkWrite deleteOne verbose", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Client bulkWrite deleteOne summary", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Client bulkWrite deleteOne defaults to summary", "runOnRequirements": [ { "minServerVersion": "7.0" @@ -62,6 +172,20 @@ } } ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } } } ], diff --git a/src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json index 83b692bbf..cdfc76505 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json @@ -35,7 +35,7 @@ ], "tests": [ { - "description": "Client bulkWrite insertOne", + "description": "Client bulkWrite insertOne verbose", "runOnRequirements": [ { "minServerVersion": "7.0" @@ -69,7 +69,104 @@ } } } - ] + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 2, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + }, + "1": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "Client bulkWrite insertOne summary", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": 2 + } + } + }, + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": 3 + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 2, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + }, + "1": { + "insertedId": 3 + } + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } } } ], diff --git a/src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json index 29680a767..e77d27f87 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json @@ -40,7 +40,139 @@ ], "tests": [ { - "description": "Client bulkWrite replaceOne", + "description": "Client bulkWrite replaceOne verbose", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 222 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + } + }, + "deleteResults": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 222 + } + ] + } + ] + }, + { + "description": "Client bulkWrite replaceOne summary", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 222 + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 222 + } + ] + } + ] + }, + { + "description": "Client bulkWrite replaceOne defaults to summary", "runOnRequirements": [ { "minServerVersion": "7.0" @@ -69,6 +201,20 @@ } } ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } } } ], diff --git a/src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json index d4eb9caac..918450714 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json @@ -44,7 +44,151 @@ ], "tests": [ { - "description": "Client bulkWrite updateMany", + "description": "Client bulkWrite updateMany verbose", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": null + } + }, + "deleteResults": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "Client bulkWrite updateMany summary", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "Client bulkWrite updateMany defaults to summary", "runOnRequirements": [ { "minServerVersion": "7.0" @@ -75,6 +219,20 @@ } } ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } } } ], diff --git a/src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json index ded819189..d877dac07 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json @@ -40,7 +40,143 @@ ], "tests": [ { - "description": "Client bulkWrite updateOne", + "description": "Client bulkWrite updateOne verbose", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + } + }, + "deleteResults": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + }, + { + "description": "Client bulkWrite updateOne summary", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + }, + { + "description": "Client bulkWrite updateOne defaults to summary", "runOnRequirements": [ { "minServerVersion": "7.0" @@ -71,6 +207,20 @@ } } ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } } } ], diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index e1bf41b02..6a92439f7 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -270,7 +270,7 @@ impl<'de> Deserialize<'de> for Operation { struct OperationDefinition { pub(crate) name: String, pub(crate) object: OperationObject, - #[serde(default = "default_arguments")] + #[serde(default = "Document::new")] pub(crate) arguments: Document, pub(crate) expect_error: Option, pub(crate) expect_result: Option, @@ -278,10 +278,6 @@ impl<'de> Deserialize<'de> for Operation { pub(crate) ignore_result_and_error: Option, } - fn default_arguments() -> Document { - doc! {} - } - let definition = OperationDefinition::deserialize(deserializer)?; let boxed_op = match definition.name.as_str() { "insertOne" => deserialize_op::(definition.arguments), diff --git a/src/test/spec/unified_runner/operation/bulk_write.rs b/src/test/spec/unified_runner/operation/bulk_write.rs index d05efae6b..7792b98ea 100644 --- a/src/test/spec/unified_runner/operation/bulk_write.rs +++ b/src/test/spec/unified_runner/operation/bulk_write.rs @@ -14,8 +14,10 @@ use crate::{ use super::TestOperation; #[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase", deny_unknown_fields)] pub(super) struct BulkWrite { requests: Vec, + verbose_results: Option, } impl<'de> Deserialize<'de> for WriteModel { @@ -87,8 +89,15 @@ impl TestOperation for BulkWrite { ) -> BoxFuture<'a, Result>> { async move { let client = test_runner.get_client(id).await; - client.bulk_write(self.requests.clone(), None).await?; - Ok(None) + let action = client.bulk_write(self.requests.clone()); + let bson = if let Some(true) = self.verbose_results { + let result = action.verbose_results().await?; + bson::to_bson(&result)? + } else { + let result = action.await?; + bson::to_bson(&result)? + }; + Ok(Some(bson.into())) } .boxed() } From fd4232d5dfd9238d81a77af27d6345275e28697f Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 12 Dec 2023 14:30:55 -0700 Subject: [PATCH 04/75] top level options support and testing --- src/client/bulk_write.rs | 13 +- src/client/bulk_write/actions.rs | 49 ++ src/client/bulk_write/models.rs | 18 +- src/client/bulk_write/results.rs | 2 +- src/operation/bulk_write.rs | 13 +- .../new-bulk-write/bulkWrite-comment.json | 97 +++ .../unified/new-bulk-write/bulkWrite-let.json | 135 ++++ .../bulkWrite-mixed_namespaces.json | 320 +++++++++ ...Many-basic.json => bulkWrite-ordered.json} | 171 +++-- .../new-bulk-write/bulkWrite-results.json | 629 ++++++++++++++++++ .../new-bulk-write/deleteOne-basic.json | 205 ------ .../new-bulk-write/insertOne-basic.json | 192 ------ .../new-bulk-write/replaceOne-basic.json | 239 ------- .../new-bulk-write/updateMany-basic.json | 261 -------- .../new-bulk-write/updateOne-basic.json | 245 ------- .../unified_runner/operation/bulk_write.rs | 191 ++++-- 16 files changed, 1523 insertions(+), 1257 deletions(-) create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-comment.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-let.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-mixed_namespaces.json rename src/test/spec/json/crud/unified/new-bulk-write/{deleteMany-basic.json => bulkWrite-ordered.json} (54%) create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-results.json delete mode 100644 src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json delete mode 100644 src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json delete mode 100644 src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json delete mode 100644 src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json delete mode 100644 src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json diff --git a/src/client/bulk_write.rs b/src/client/bulk_write.rs index adcd44be0..64eab4782 100644 --- a/src/client/bulk_write.rs +++ b/src/client/bulk_write.rs @@ -28,17 +28,26 @@ impl Client { #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] -pub struct BulkWriteOptions { +pub(crate) struct BulkWriteOptions { + #[serde(default = "some_true")] pub ordered: Option, pub bypass_document_validation: Option, pub comment: Option, #[serde(rename = "let")] pub let_vars: Option, #[serialize_always] - #[serde(rename = "errorsOnly", serialize_with = "serialize_opposite_bool")] + #[serde( + alias = "verboseResults", + rename = "errorsOnly", + serialize_with = "serialize_opposite_bool" + )] pub verbose_results: Option, } +fn some_true() -> Option { + Some(true) +} + fn serialize_opposite_bool( val: &Option, serializer: S, diff --git a/src/client/bulk_write/actions.rs b/src/client/bulk_write/actions.rs index de48a5487..a4ba3448a 100644 --- a/src/client/bulk_write/actions.rs +++ b/src/client/bulk_write/actions.rs @@ -4,6 +4,7 @@ use futures_core::future::BoxFuture; use futures_util::FutureExt; use crate::{ + bson::{Bson, Document}, error::{ErrorKind, Result}, operation::BulkWrite, Client, @@ -22,6 +23,28 @@ pub struct VerboseBulkWriteAction { options: BulkWriteOptions, } +impl VerboseBulkWriteAction { + pub fn ordered(mut self, ordered: bool) -> Self { + self.options.ordered = Some(ordered); + self + } + + pub fn bypass_document_validation(mut self, bypass_document_validation: bool) -> Self { + self.options.bypass_document_validation = Some(bypass_document_validation); + self + } + + pub fn comment(mut self, comment: impl Into) -> Self { + self.options.comment = Some(comment.into()); + self + } + + pub fn let_vars(mut self, let_vars: Document) -> Self { + self.options.let_vars = Some(let_vars); + self + } +} + impl IntoFuture for VerboseBulkWriteAction { type Output = Result; type IntoFuture = BoxFuture<'static, Self::Output>; @@ -88,6 +111,32 @@ impl SummaryBulkWriteAction { } } + pub fn ordered(self, ordered: bool) -> Self { + Self { + inner: self.inner.ordered(ordered), + } + } + + pub fn bypass_document_validation(self, bypass_document_validation: bool) -> Self { + Self { + inner: self + .inner + .bypass_document_validation(bypass_document_validation), + } + } + + pub fn comment(self, comment: impl Into) -> Self { + Self { + inner: self.inner.comment(comment), + } + } + + pub fn let_vars(self, let_vars: Document) -> Self { + Self { + inner: self.inner.let_vars(let_vars), + } + } + pub fn verbose_results(mut self) -> VerboseBulkWriteAction { self.inner.options.verbose_results = Some(true); self.inner diff --git a/src/client/bulk_write/models.rs b/src/client/bulk_write/models.rs index 60aee5e06..9a00cf76d 100644 --- a/src/client/bulk_write/models.rs +++ b/src/client/bulk_write/models.rs @@ -4,7 +4,7 @@ use serde::Serialize; use serde_with::skip_serializing_none; use crate::{ - bson::{oid::ObjectId, Bson, Document, RawDocumentBuf}, + bson::{oid::ObjectId, Array, Bson, Document, RawDocumentBuf}, error::Result, options::UpdateModifications, Namespace, @@ -28,6 +28,10 @@ pub enum WriteModel { filter: Document, #[serde(rename = "updateMods")] update: UpdateModifications, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, }, #[non_exhaustive] UpdateMany { @@ -36,6 +40,10 @@ pub enum WriteModel { filter: Document, #[serde(rename = "updateMods")] update: UpdateModifications, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, }, #[non_exhaustive] ReplaceOne { @@ -44,18 +52,26 @@ pub enum WriteModel { filter: Document, #[serde(rename = "updateMods")] replacement: Document, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, }, #[non_exhaustive] DeleteOne { #[serde(skip)] namespace: Namespace, filter: Document, + collation: Option, + hint: Option, }, #[non_exhaustive] DeleteMany { #[serde(skip)] namespace: Namespace, filter: Document, + collation: Option, + hint: Option, }, } diff --git a/src/client/bulk_write/results.rs b/src/client/bulk_write/results.rs index e205264d8..029b6acd5 100644 --- a/src/client/bulk_write/results.rs +++ b/src/client/bulk_write/results.rs @@ -75,7 +75,7 @@ impl TryFrom for UpdateResult { Ok(Self { matched_count: response.n, modified_count, - upserted_id: response.upserted, + upserted_id: response.upserted.map(|upserted| upserted.id), }) } } diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index b731a4da0..6acc144ab 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -76,12 +76,15 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { ops.push(model_doc); } - let command = rawdoc! { + let mut command = rawdoc! { Self::NAME: 1, "ops": ops, "nsInfo": namespace_info.namespaces, }; + let options = bson::to_raw_document_buf(&self.options)?; + bson_util::extend_raw_document_buf(&mut command, options)?; + Ok(Command::new(Self::NAME, "admin", command)) } @@ -133,7 +136,13 @@ pub(crate) struct BulkWriteOperationResponse { pub(crate) index: usize, pub(crate) n: u64, pub(crate) n_modified: Option, - pub(crate) upserted: Option, + pub(crate) upserted: Option, +} + +#[derive(Deserialize)] +pub(crate) struct UpsertedId { + #[serde(rename = "_id")] + pub(crate) id: Bson, } impl BulkWriteOperationResponse { diff --git a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-comment.json b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-comment.json new file mode 100644 index 000000000..489582310 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-comment.json @@ -0,0 +1,97 @@ +{ + "description": "client bulkWrite comment", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "bulkWrite comment", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": "1" + } + } + } + ], + "comment": { + "bulk": "write" + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "comment": { + "bulk": "write" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": "1" + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-let.json b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-let.json new file mode 100644 index 000000000..0a720ae1f --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-let.json @@ -0,0 +1,135 @@ +{ + "description": "client bulkWrite let variables", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "bulkWrite let variables", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "let": { + "id1": 1, + "id2": 2 + }, + "requests": [ + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "let": { + "id1": 1, + "id2": 2 + } + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-mixed_namespaces.json b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-mixed_namespaces.json new file mode 100644 index 000000000..fa60526ac --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-mixed_namespaces.json @@ -0,0 +1,320 @@ +{ + "description": "bulkWrite namespaces", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "bulkWrite-db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "bulkWrite-db1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "databaseName": "bulkWrite-db0", + "collectionName": "coll0", + "documents": [] + }, + { + "databaseName": "bulkWrite-db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2 + } + ] + }, + { + "databaseName": "bulkWrite-db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "tests": [ + { + "description": "bulkWrite mixed namespaces", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "namespace": { + "db": "bulkWrite-db0", + "coll": "coll0" + }, + "document": { + "_id": 1 + } + } + }, + { + "insertOne": { + "namespace": { + "db": "bulkWrite-db0", + "coll": "coll0" + }, + "document": { + "_id": 2 + } + } + }, + { + "updateOne": { + "namespace": { + "db": "bulkWrite-db0", + "coll": "coll1" + }, + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "bulkWrite-db1", + "coll": "coll2" + }, + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "bulkWrite-db0", + "coll": "coll1" + }, + "filter": { + "_id": 2 + } + } + }, + { + "replaceOne": { + "namespace": { + "db": "bulkWrite-db1", + "coll": "coll2" + }, + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 2, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 2, + "insertResults": { + "0": { + "insertedId": 1 + }, + "1": { + "insertedId": 2 + } + }, + "updateResults": { + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + }, + "5": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + }, + "4": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1 + } + }, + { + "insert": 0, + "document": { + "_id": 2 + } + }, + { + "update": 1, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 2, + "filter": { + "_id": 3 + }, + "multi": false + }, + { + "delete": 1, + "filter": { + "_id": 2 + }, + "multi": false + }, + { + "update": 2, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "bulkWrite-db0.coll0" + }, + { + "ns": "bulkWrite-db0.coll1" + }, + { + "ns": "bulkWrite-db1.coll2" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "bulkWrite-db0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "databaseName": "bulkWrite-db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "databaseName": "bulkWrite-db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-ordered.json similarity index 54% rename from src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json rename to src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-ordered.json index 0cf8ca7f9..48c5a7bd9 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/deleteMany-basic.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-ordered.json @@ -1,10 +1,13 @@ { - "description": "Client bulkWrite deleteMany", + "description": "client bulkWrite ordered option", "schemaVersion": "1.0", "createEntities": [ { "client": { - "id": "client0" + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] } }, { @@ -26,22 +29,12 @@ { "collectionName": "coll0", "databaseName": "crud-tests", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] + "documents": [] } ], "tests": [ { - "description": "Client bulkWrite deleteMany verbose", + "description": "unordered bulkWrite", "runOnRequirements": [ { "minServerVersion": "7.0" @@ -54,51 +47,73 @@ "arguments": { "requests": [ { - "deleteMany": { + "insertOne": { "namespace": { "db": "crud-tests", "coll": "coll0" }, - "filter": { - "_id": { - "$lte": 2 - } + "document": { + "_id": 4 } } } ], - "verboseResults": true + "verboseResults": true, + "ordered": false }, "expectResult": { - "insertedCount": 0, + "insertedCount": 1, "upsertedCount": 0, "matchedCount": 0, "modifiedCount": 0, - "deletedCount": 2, - "insertResults": {}, - "updateResults": {}, - "deleteResults": { + "deletedCount": 0, + "insertResults": { "0": { - "deletedCount": 2 + "insertedId": 4 } - } + }, + "updateResults": {}, + "deleteResults": {} } } ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4 + } + } + ], + "ordered": false + } + } + } + ] + } + ], "outcome": [ { "collectionName": "coll0", "databaseName": "crud-tests", "documents": [ { - "_id": 3 + "_id": 4 } ] } ] }, { - "description": "Client bulkWrite deleteMany summary", + "description": "ordered bulkWrite", "runOnRequirements": [ { "minServerVersion": "7.0" @@ -111,51 +126,73 @@ "arguments": { "requests": [ { - "deleteMany": { + "insertOne": { "namespace": { "db": "crud-tests", "coll": "coll0" }, - "filter": { - "_id": { - "$lte": 2 - } + "document": { + "_id": 4 } } } ], - "verboseResults": false + "verboseResults": true, + "ordered": true }, "expectResult": { - "insertedCount": 0, + "insertedCount": 1, "upsertedCount": 0, "matchedCount": 0, "modifiedCount": 0, - "deletedCount": 2, - "insertResults": {}, - "updateResults": { - "$$unsetOrMatches": {} + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } }, - "deleteResults": { - "$$unsetOrMatches": {} - } + "updateResults": {}, + "deleteResults": {} } } ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4 + } + } + ], + "ordered": true + } + } + } + ] + } + ], "outcome": [ { "collectionName": "coll0", "databaseName": "crud-tests", "documents": [ { - "_id": 3 + "_id": 4 } ] } ] }, { - "description": "Client bulkWrite deleteMany defaults to summary", + "description": "bulkWrite defaults to ordered", "runOnRequirements": [ { "minServerVersion": "7.0" @@ -168,43 +205,57 @@ "arguments": { "requests": [ { - "deleteMany": { + "insertOne": { "namespace": { "db": "crud-tests", "coll": "coll0" }, - "filter": { - "_id": { - "$lte": 2 - } + "document": { + "_id": 4 } } } - ] + ], + "verboseResults": true }, "expectResult": { - "insertedCount": 0, + "insertedCount": 1, "upsertedCount": 0, "matchedCount": 0, "modifiedCount": 0, - "deletedCount": 2, - "insertResults": {}, - "updateResults": { - "$$unsetOrMatches": {} + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } }, - "deleteResults": { - "$$unsetOrMatches": {} - } + "updateResults": {}, + "deleteResults": {} } } ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ordered": true + } + } + } + ] + } + ], "outcome": [ { "collectionName": "coll0", "databaseName": "crud-tests", "documents": [ { - "_id": 3 + "_id": 4 } ] } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-results.json b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-results.json new file mode 100644 index 000000000..b20baecf5 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-results.json @@ -0,0 +1,629 @@ +{ + "description": "client bulkWrite results", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "tests": [ + { + "description": "verbose bulkWrite returns detailed results", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 4, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": null + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "errorsOnly": false + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "summary bulkWrite omits detailed results", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 4, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "errorsOnly": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "bulkWrite defaults to summary results", + "runOnRequirements": [ + { + "minServerVersion": "7.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 4, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "errorsOnly": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json deleted file mode 100644 index 477085517..000000000 --- a/src/test/spec/json/crud/unified/new-bulk-write/deleteOne-basic.json +++ /dev/null @@ -1,205 +0,0 @@ -{ - "description": "Client bulkWrite deleteOne", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll0" - } - } - ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - ], - "tests": [ - { - "description": "Client bulkWrite deleteOne verbose", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": 1 - } - } - } - ], - "verboseResults": true - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 0, - "modifiedCount": 0, - "deletedCount": 1, - "insertResults": {}, - "updateResults": {}, - "deleteResults": { - "0": { - "deletedCount": 1 - } - } - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 2 - } - ] - } - ] - }, - { - "description": "Client bulkWrite deleteOne summary", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": 1 - } - } - } - ], - "verboseResults": false - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 0, - "modifiedCount": 0, - "deletedCount": 1, - "insertResults": {}, - "updateResults": { - "$$unsetOrMatches": {} - }, - "deleteResults": { - "$$unsetOrMatches": {} - } - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 2 - } - ] - } - ] - }, - { - "description": "Client bulkWrite deleteOne defaults to summary", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": 1 - } - } - } - ] - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 0, - "modifiedCount": 0, - "deletedCount": 1, - "insertResults": {}, - "updateResults": { - "$$unsetOrMatches": {} - }, - "deleteResults": { - "$$unsetOrMatches": {} - } - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 2 - } - ] - } - ] - } - ] -} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json deleted file mode 100644 index cdfc76505..000000000 --- a/src/test/spec/json/crud/unified/new-bulk-write/insertOne-basic.json +++ /dev/null @@ -1,192 +0,0 @@ -{ - "description": "Client bulkWrite insertOne", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll0" - } - } - ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1 - } - ] - } - ], - "tests": [ - { - "description": "Client bulkWrite insertOne verbose", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "document": { - "_id": 2 - } - } - }, - { - "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "document": { - "_id": 3 - } - } - } - ], - "verboseResults": true - }, - "expectResult": { - "insertedCount": 2, - "upsertedCount": 0, - "matchedCount": 0, - "modifiedCount": 0, - "deletedCount": 0, - "insertResults": { - "0": { - "insertedId": 2 - }, - "1": { - "insertedId": 3 - } - }, - "updateResults": {}, - "deleteResults": {} - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - ] - }, - { - "description": "Client bulkWrite insertOne summary", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "document": { - "_id": 2 - } - } - }, - { - "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "document": { - "_id": 3 - } - } - } - ], - "verboseResults": false - }, - "expectResult": { - "insertedCount": 2, - "upsertedCount": 0, - "matchedCount": 0, - "modifiedCount": 0, - "deletedCount": 0, - "insertResults": { - "0": { - "insertedId": 2 - }, - "1": { - "insertedId": 3 - } - }, - "updateResults": { - "$$unsetOrMatches": {} - }, - "deleteResults": { - "$$unsetOrMatches": {} - } - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - ] - } - ] -} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json deleted file mode 100644 index e77d27f87..000000000 --- a/src/test/spec/json/crud/unified/new-bulk-write/replaceOne-basic.json +++ /dev/null @@ -1,239 +0,0 @@ -{ - "description": "Client bulkWrite replaceOne", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll0" - } - } - ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Client bulkWrite replaceOne verbose", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "replaceOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 222 - } - } - } - ], - "verboseResults": true - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 1, - "modifiedCount": 1, - "deletedCount": 0, - "insertResults": {}, - "updateResults": { - "0": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedId": null - } - }, - "deleteResults": {} - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 222 - } - ] - } - ] - }, - { - "description": "Client bulkWrite replaceOne summary", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "replaceOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 222 - } - } - } - ], - "verboseResults": false - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 1, - "modifiedCount": 1, - "deletedCount": 0, - "insertResults": {}, - "updateResults": { - "$$unsetOrMatches": {} - }, - "deleteResults": { - "$$unsetOrMatches": {} - } - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 222 - } - ] - } - ] - }, - { - "description": "Client bulkWrite replaceOne defaults to summary", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "replaceOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 222 - } - } - } - ] - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 1, - "modifiedCount": 1, - "deletedCount": 0, - "insertResults": {}, - "updateResults": { - "$$unsetOrMatches": {} - }, - "deleteResults": { - "$$unsetOrMatches": {} - } - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 222 - } - ] - } - ] - } - ] -} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json deleted file mode 100644 index 918450714..000000000 --- a/src/test/spec/json/crud/unified/new-bulk-write/updateMany-basic.json +++ /dev/null @@ -1,261 +0,0 @@ -{ - "description": "Client bulkWrite updateMany", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll0" - } - } - ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "tests": [ - { - "description": "Client bulkWrite updateMany verbose", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "verboseResults": true - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 2, - "modifiedCount": 2, - "deletedCount": 0, - "insertResults": {}, - "updateResults": { - "0": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedId": null - } - }, - "deleteResults": {} - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 34 - } - ] - } - ] - }, - { - "description": "Client bulkWrite updateMany summary", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "verboseResults": false - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 2, - "modifiedCount": 2, - "deletedCount": 0, - "insertResults": {}, - "updateResults": { - "$$unsetOrMatches": {} - }, - "deleteResults": { - "$$unsetOrMatches": {} - } - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 34 - } - ] - } - ] - }, - { - "description": "Client bulkWrite updateMany defaults to summary", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ] - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 2, - "modifiedCount": 2, - "deletedCount": 0, - "insertResults": {}, - "updateResults": { - "$$unsetOrMatches": {} - }, - "deleteResults": { - "$$unsetOrMatches": {} - } - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 34 - } - ] - } - ] - } - ] -} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json b/src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json deleted file mode 100644 index d877dac07..000000000 --- a/src/test/spec/json/crud/unified/new-bulk-write/updateOne-basic.json +++ /dev/null @@ -1,245 +0,0 @@ -{ - "description": "Client bulkWrite updateOne", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0" - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll0" - } - } - ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "Client bulkWrite updateOne verbose", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "verboseResults": true - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 1, - "modifiedCount": 1, - "deletedCount": 0, - "insertResults": {}, - "updateResults": { - "0": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedId": null - } - }, - "deleteResults": {} - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - } - ] - } - ] - }, - { - "description": "Client bulkWrite updateOne summary", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "verboseResults": false - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 1, - "modifiedCount": 1, - "deletedCount": 0, - "insertResults": {}, - "updateResults": { - "$$unsetOrMatches": {} - }, - "deleteResults": { - "$$unsetOrMatches": {} - } - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - } - ] - } - ] - }, - { - "description": "Client bulkWrite updateOne defaults to summary", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ] - }, - "expectResult": { - "insertedCount": 0, - "upsertedCount": 0, - "matchedCount": 1, - "modifiedCount": 1, - "deletedCount": 0, - "insertResults": {}, - "updateResults": { - "$$unsetOrMatches": {} - }, - "deleteResults": { - "$$unsetOrMatches": {} - } - } - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - } - ] - } - ] - } - ] -} diff --git a/src/test/spec/unified_runner/operation/bulk_write.rs b/src/test/spec/unified_runner/operation/bulk_write.rs index 7792b98ea..1b81cf0af 100644 --- a/src/test/spec/unified_runner/operation/bulk_write.rs +++ b/src/test/spec/unified_runner/operation/bulk_write.rs @@ -3,8 +3,8 @@ use futures_util::FutureExt; use serde::Deserialize; use crate::{ - bson::Document, - client::bulk_write::models::WriteModel, + bson::{Array, Bson, Document}, + client::bulk_write::{models::WriteModel, BulkWriteOptions}, coll::options::UpdateModifications, error::Result, test::spec::unified_runner::{Entity, TestRunner}, @@ -17,7 +17,8 @@ use super::TestOperation; #[serde(rename_all = "camelCase", deny_unknown_fields)] pub(super) struct BulkWrite { requests: Vec, - verbose_results: Option, + #[serde(flatten)] + options: BulkWriteOptions, } impl<'de> Deserialize<'de> for WriteModel { @@ -25,56 +26,136 @@ impl<'de> Deserialize<'de> for WriteModel { where D: serde::Deserializer<'de>, { - use serde::de::Error as DeError; - #[derive(Deserialize)] - struct WriteModelHelper { - namespace: Namespace, - document: Option, - filter: Option, - update: Option, - replacement: Option, + #[serde(rename_all = "camelCase")] + enum WriteModelHelper { + InsertOne { + namespace: Namespace, + document: Document, + }, + UpdateOne { + namespace: Namespace, + filter: Document, + update: UpdateModifications, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, + }, + UpdateMany { + namespace: Namespace, + filter: Document, + update: UpdateModifications, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, + }, + ReplaceOne { + namespace: Namespace, + filter: Document, + replacement: Document, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, + }, + DeleteOne { + namespace: Namespace, + filter: Document, + collation: Option, + hint: Option, + }, + DeleteMany { + namespace: Namespace, + filter: Document, + collation: Option, + hint: Option, + }, } - let model_doc = Document::deserialize(deserializer)?; - let Some((key, value)) = model_doc.into_iter().next() else { - return Err(DeError::custom("empty write model")); - }; - let body: WriteModelHelper = bson::from_bson(value).map_err(DeError::custom)?; - - let model = match key.as_str() { - "insertOne" => WriteModel::InsertOne { - namespace: body.namespace, - document: body.document.unwrap(), + let helper = WriteModelHelper::deserialize(deserializer)?; + let model = match helper { + WriteModelHelper::InsertOne { + namespace, + document, + } => WriteModel::InsertOne { + namespace, + document, }, - "updateOne" => WriteModel::UpdateOne { - namespace: body.namespace, - filter: body.filter.unwrap(), - update: body.update.unwrap(), + WriteModelHelper::UpdateOne { + namespace, + filter, + update, + array_filters, + collation, + hint, + upsert, + } => WriteModel::UpdateOne { + namespace, + filter, + update, + array_filters, + collation, + hint, + upsert, }, - "updateMany" => WriteModel::UpdateMany { - namespace: body.namespace, - filter: body.filter.unwrap(), - update: body.update.unwrap(), + WriteModelHelper::UpdateMany { + namespace, + filter, + update, + array_filters, + collation, + hint, + upsert, + } => WriteModel::UpdateMany { + namespace, + filter, + update, + array_filters, + collation, + hint, + upsert, }, - "replaceOne" => WriteModel::ReplaceOne { - namespace: body.namespace, - filter: body.filter.unwrap(), - replacement: body.replacement.unwrap(), + WriteModelHelper::ReplaceOne { + namespace, + filter, + replacement, + array_filters, + collation, + hint, + upsert, + } => WriteModel::ReplaceOne { + namespace, + filter, + replacement, + array_filters, + collation, + hint, + upsert, }, - "deleteOne" => WriteModel::DeleteOne { - namespace: body.namespace, - filter: body.filter.unwrap(), + WriteModelHelper::DeleteOne { + namespace, + filter, + collation, + hint, + } => WriteModel::DeleteOne { + namespace, + filter, + collation, + hint, }, - "deleteMany" => WriteModel::DeleteMany { - namespace: body.namespace, - filter: body.filter.unwrap(), + WriteModelHelper::DeleteMany { + namespace, + filter, + collation, + hint, + } => WriteModel::DeleteMany { + namespace, + filter, + collation, + hint, }, - other => { - return Err(DeError::custom(format!( - "unknown bulkWrite operation: {other}" - ))) - } }; Ok(model) @@ -89,14 +170,26 @@ impl TestOperation for BulkWrite { ) -> BoxFuture<'a, Result>> { async move { let client = test_runner.get_client(id).await; - let action = client.bulk_write(self.requests.clone()); - let bson = if let Some(true) = self.verbose_results { + let mut action = client.bulk_write(self.requests.clone()); + if let Some(ordered) = self.options.ordered { + action = action.ordered(ordered); + } + if let Some(bypass_document_validation) = self.options.bypass_document_validation { + action = action.bypass_document_validation(bypass_document_validation); + } + if let Some(ref comment) = self.options.comment { + action = action.comment(comment.clone()); + } + if let Some(ref let_vars) = self.options.let_vars { + action = action.let_vars(let_vars.clone()); + } + let bson = if let Some(true) = self.options.verbose_results { let result = action.verbose_results().await?; - bson::to_bson(&result)? + bson::to_bson(&result) } else { let result = action.await?; - bson::to_bson(&result)? - }; + bson::to_bson(&result) + }?; Ok(Some(bson.into())) } .boxed() From 28d1bb60fbe78bdedd6edc3cf511a0c89245a394 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 12 Dec 2023 15:33:50 -0700 Subject: [PATCH 05/75] impl and refactor --- Cargo.toml | 1 + src/action.rs | 5 +- src/action/bulk_write.rs | 261 +++++ src/action/bulk_write/error.rs | 31 + src/action/bulk_write/results.rs | 52 + .../bulk_write/write_models.rs} | 84 +- src/bson_util.rs | 59 +- src/client.rs | 21 +- src/client/bulk_write.rs | 57 -- src/client/bulk_write/actions.rs | 153 --- src/client/bulk_write/results.rs | 107 --- src/client/executor.rs | 49 +- src/cmap/conn/command.rs | 12 - src/cmap/conn/stream_description.rs | 25 - src/coll.rs | 39 +- src/cursor/common.rs | 15 - src/error.rs | 33 +- src/operation.rs | 64 +- src/operation/abort_transaction.rs | 12 +- src/operation/aggregate.rs | 58 +- src/operation/aggregate/change_stream.rs | 57 +- src/operation/bulk_write.rs | 381 ++++++-- src/operation/commit_transaction.rs | 12 +- src/operation/count.rs | 12 +- src/operation/count_documents.rs | 21 +- src/operation/create.rs | 12 +- src/operation/create_indexes.rs | 31 +- src/operation/create_indexes/test.rs | 85 -- src/operation/delete.rs | 36 +- src/operation/delete/test.rs | 196 ---- src/operation/distinct.rs | 12 +- src/operation/drop_collection.rs | 12 +- src/operation/drop_database.rs | 12 +- src/operation/drop_indexes.rs | 23 +- src/operation/drop_indexes/test.rs | 43 - src/operation/find.rs | 57 +- src/operation/find/test.rs | 80 -- src/operation/find_and_modify.rs | 42 +- src/operation/get_more.rs | 31 +- src/operation/get_more/test.rs | 47 - src/operation/insert.rs | 135 ++- src/operation/list_collections.rs | 24 +- src/operation/list_databases.rs | 12 +- src/operation/list_indexes.rs | 38 +- src/operation/list_indexes/test.rs | 90 -- src/operation/raw_output.rs | 8 +- src/operation/run_command.rs | 11 +- src/operation/run_command/test.rs | 24 - src/operation/run_cursor_command.rs | 36 +- src/operation/search_index.rs | 75 +- src/operation/test.rs | 149 --- src/operation/update.rs | 67 +- src/operation/update/test.rs | 116 --- src/serde_util.rs | 19 + src/test/bulk_write.rs | 68 +- src/test/coll.rs | 55 +- src/test/spec.rs | 7 +- .../new-bulk-write/bulkWrite-comment.json | 97 -- .../unified/new-bulk-write/bulkWrite-let.json | 135 --- .../client-bulkWrite-delete-options.json | 260 +++++ .../client-bulkWrite-errors.json | 437 +++++++++ ...=> client-bulkWrite-mixed-namespaces.json} | 19 +- .../client-bulkWrite-options.json | 322 +++++++ ...red.json => client-bulkWrite-ordered.json} | 48 +- ...lts.json => client-bulkWrite-results.json} | 301 +++++- .../client-bulkWrite-update-options.json | 900 ++++++++++++++++++ .../client-bulkWrite-serverErrors.json | 730 ++++++++++++++ .../unified/client-bulkWrite-serverErrors.yml | 338 +++++++ .../unified/client-bulkWrite.json | 401 ++++++++ src/test/spec/unified_runner/operation.rs | 34 +- .../unified_runner/operation/bulk_write.rs | 49 +- src/test/spec/unified_runner/test_file.rs | 80 +- src/test/spec/unified_runner/test_runner.rs | 5 +- 73 files changed, 5217 insertions(+), 2213 deletions(-) create mode 100644 src/action/bulk_write.rs create mode 100644 src/action/bulk_write/error.rs create mode 100644 src/action/bulk_write/results.rs rename src/{client/bulk_write/models.rs => action/bulk_write/write_models.rs} (56%) delete mode 100644 src/client/bulk_write.rs delete mode 100644 src/client/bulk_write/actions.rs delete mode 100644 src/client/bulk_write/results.rs delete mode 100644 src/operation/create_indexes/test.rs delete mode 100644 src/operation/delete/test.rs delete mode 100644 src/operation/drop_indexes/test.rs delete mode 100644 src/operation/find/test.rs delete mode 100644 src/operation/get_more/test.rs delete mode 100644 src/operation/list_indexes/test.rs delete mode 100644 src/operation/run_command/test.rs delete mode 100644 src/operation/test.rs delete mode 100644 src/operation/update/test.rs delete mode 100644 src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-comment.json delete mode 100644 src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-let.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json rename src/test/spec/json/crud/unified/new-bulk-write/{bulkWrite-mixed_namespaces.json => client-bulkWrite-mixed-namespaces.json} (95%) create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json rename src/test/spec/json/crud/unified/new-bulk-write/{bulkWrite-ordered.json => client-bulkWrite-ordered.json} (83%) rename src/test/spec/json/crud/unified/new-bulk-write/{bulkWrite-results.json => client-bulkWrite-results.json} (62%) create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json create mode 100644 src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json create mode 100644 src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml create mode 100644 src/test/spec/json/transactions/unified/client-bulkWrite.json diff --git a/Cargo.toml b/Cargo.toml index 2d538a846..9d0ed742b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -168,6 +168,7 @@ tokio = { version = ">= 0.0.0", features = ["fs", "parking_lot"] } tracing-subscriber = "0.3.16" regex = "1.6.0" serde-hex = "0.1.0" +serde_path_to_error = "0.1.15" [package.metadata.docs.rs] rustdoc-args = ["--cfg", "docsrs"] diff --git a/src/action.rs b/src/action.rs index 2e147a8b0..c8ff1d327 100644 --- a/src/action.rs +++ b/src/action.rs @@ -1,6 +1,7 @@ //! Action builder types. mod aggregate; +pub(crate) mod bulk_write; mod count; mod create_collection; mod create_index; @@ -22,8 +23,10 @@ mod watch; use std::{marker::PhantomData, ops::Deref}; +use crate::bson::Document; + pub use aggregate::Aggregate; -use bson::Document; +pub use bulk_write::BulkWrite; pub use count::{CountDocuments, EstimatedDocumentCount}; pub use create_collection::CreateCollection; pub use create_index::CreateIndex; diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs new file mode 100644 index 000000000..b27ee75a9 --- /dev/null +++ b/src/action/bulk_write.rs @@ -0,0 +1,261 @@ +#![allow(missing_docs)] + +pub(crate) mod error; +pub(crate) mod results; +pub(crate) mod write_models; + +use std::collections::HashMap; + +use serde::{ser::SerializeMap, Deserialize, Serialize}; +use serde_with::skip_serializing_none; + +use crate::{ + bson::{Bson, Document}, + error::{Error, ErrorKind, Result}, + operation::bulk_write::BulkWrite as BulkWriteOperation, + Client, + ClientSession, +}; + +use super::{action_impl, option_setters}; + +use error::BulkWriteError; +use results::BulkWriteResult; +use write_models::WriteModel; + +impl Client { + pub fn bulk_write(&self, models: impl IntoIterator) -> BulkWrite { + BulkWrite::new(self.clone(), models.into_iter().collect()) + } +} + +#[skip_serializing_none] +#[derive(Clone, Debug, Default, Deserialize)] +#[serde(rename_all = "camelCase")] +#[non_exhaustive] +pub struct BulkWriteOptions { + pub ordered: Option, + pub bypass_document_validation: Option, + pub comment: Option, + #[serde(rename = "let")] + pub let_vars: Option, + pub verbose_results: Option, +} + +impl Serialize for BulkWriteOptions { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + let mut map_serializer = serializer.serialize_map(None)?; + + let ordered = self.ordered.unwrap_or(true); + map_serializer.serialize_entry("ordered", &ordered)?; + + if let Some(bypass_document_validation) = self.bypass_document_validation { + map_serializer + .serialize_entry("bypassDocumentValidation", &bypass_document_validation)?; + } + + if let Some(ref comment) = self.comment { + map_serializer.serialize_entry("comment", comment)?; + } + + if let Some(ref let_vars) = self.let_vars { + map_serializer.serialize_entry("let", let_vars)?; + } + + let errors_only = self.verbose_results.map(|b| !b).unwrap_or(true); + map_serializer.serialize_entry("errorsOnly", &errors_only)?; + + map_serializer.end() + } +} + +#[must_use] +pub struct BulkWrite<'a> { + client: Client, + models: Vec, + options: Option, + session: Option<&'a mut ClientSession>, +} + +impl<'a> BulkWrite<'a> { + fn new(client: Client, models: Vec) -> Self { + Self { + client, + models, + options: None, + session: None, + } + } + + fn is_ordered(&self) -> bool { + self.options + .as_ref() + .and_then(|options| options.ordered) + .unwrap_or(true) + } +} + +impl<'a> BulkWrite<'a> { + option_setters!(options: BulkWriteOptions; + ordered: bool, + + bypass_document_validation: bool, + + comment: Bson, + + let_vars: Document, + + verbose_results: bool, + ); + + pub fn session(mut self, session: &'a mut ClientSession) -> BulkWrite<'a> { + self.session = Some(session); + self + } +} + +action_impl! { + impl Action<'a> for BulkWrite<'a> { + type Future = BulkWriteFuture; + + async fn execute(mut self) -> Result { + let mut total_attempted = 0; + let mut execution_status = ExecutionStatus::None; + + while total_attempted < self.models.len() + && execution_status.should_continue(self.is_ordered()) + { + let mut operation = BulkWriteOperation::new( + self.client.clone(), + &self.models[total_attempted..], + total_attempted, + self.options.as_ref(), + ) + .await; + let result = self + .client + .execute_operation::( + &mut operation, + self.session.as_deref_mut(), + ) + .await; + total_attempted += operation.n_attempted; + + match result { + Ok(result) => { + execution_status = execution_status.with_success(result); + } + Err(error) => { + execution_status = execution_status.with_failure(error); + } + } + } + + match execution_status { + ExecutionStatus::Success(bulk_write_result) => Ok(bulk_write_result), + ExecutionStatus::Error(error) => Err(error), + ExecutionStatus::None => Err(ErrorKind::InvalidArgument { + message: "bulk_write must be provided at least one write operation".into(), + } + .into()), + } + } + } +} + +/// Represents the execution status of a bulk write. The status starts at `None`, indicating that no +/// writes have been attempted yet, and transitions to either `Success` or `Error` as batches are +/// executed. The contents of `Error` can be inspected to determine whether a bulk write can +/// continue with further batches or should be terminated. +enum ExecutionStatus { + Success(BulkWriteResult), + Error(Error), + None, +} + +impl ExecutionStatus { + fn with_success(mut self, result: BulkWriteResult) -> Self { + match self { + // Merge two successful sets of results together. + Self::Success(ref mut current_result) => { + current_result.merge(result); + self + } + // Merge the results of the new batch into the existing bulk write error. + Self::Error(ref mut current_error) => { + let bulk_write_error = Self::get_current_bulk_write_error(current_error); + bulk_write_error.merge_partial_results(result); + self + } + Self::None => Self::Success(result), + } + } + + fn with_failure(self, mut error: Error) -> Self { + match self { + // If the new error is a BulkWriteError, merge the successful results into the error's + // partial result. Otherwise, create a new BulkWriteError with the existing results and + // set its source as the error that just occurred. + Self::Success(current_result) => match *error.kind { + ErrorKind::ClientBulkWrite(ref mut bulk_write_error) => { + bulk_write_error.merge_partial_results(current_result); + Self::Error(error) + } + _ => { + let bulk_write_error: Error = ErrorKind::ClientBulkWrite(BulkWriteError { + write_errors: HashMap::new(), + write_concern_errors: Vec::new(), + partial_result: Some(current_result), + }) + .into(); + Self::Error(bulk_write_error.with_source(error)) + } + }, + // If the new error is a BulkWriteError, merge its contents with the existing error. + // Otherwise, set the new error as the existing error's source. + Self::Error(mut current_error) => match *error.kind { + ErrorKind::ClientBulkWrite(bulk_write_error) => { + let current_bulk_write_error = + Self::get_current_bulk_write_error(&mut current_error); + current_bulk_write_error.merge(bulk_write_error); + Self::Error(current_error) + } + _ => Self::Error(current_error.with_source(error)), + }, + Self::None => Self::Error(error), + } + } + + /// Gets a BulkWriteError from a given Error. This method should only be called when adding a + /// new result or error to the existing state, as it requires that the given Error's kind is + /// ClientBulkWrite. + fn get_current_bulk_write_error(error: &mut Error) -> &mut BulkWriteError { + match *error.kind { + ErrorKind::ClientBulkWrite(ref mut bulk_write_error) => bulk_write_error, + _ => unreachable!(), + } + } + + /// Whether further bulk write batches should be executed based on the current status of + /// execution. + fn should_continue(&self, ordered: bool) -> bool { + match self { + Self::Error(ref error) => { + match *error.kind { + ErrorKind::ClientBulkWrite(ref bulk_write_error) => { + // A top-level error is always fatal. If an individual operation fails + // during an ordered bulk write, no more batches should be executed. + error.source.is_some() + || (ordered && !bulk_write_error.write_errors.is_empty()) + } + // A top-level error is always fatal. + _ => false, + } + } + _ => true, + } + } +} diff --git a/src/action/bulk_write/error.rs b/src/action/bulk_write/error.rs new file mode 100644 index 000000000..dd6d27d6b --- /dev/null +++ b/src/action/bulk_write/error.rs @@ -0,0 +1,31 @@ +use std::collections::HashMap; + +use crate::error::{WriteConcernError, WriteError}; + +use super::results::BulkWriteResult; + +#[derive(Clone, Debug, Default)] +#[non_exhaustive] +pub struct BulkWriteError { + pub write_concern_errors: Vec, + pub write_errors: HashMap, + pub partial_result: Option, +} + +impl BulkWriteError { + pub(crate) fn merge(&mut self, other: BulkWriteError) { + self.write_concern_errors.extend(other.write_concern_errors); + self.write_errors.extend(other.write_errors); + if let Some(other_partial_result) = other.partial_result { + self.merge_partial_results(other_partial_result); + } + } + + pub(crate) fn merge_partial_results(&mut self, other_partial_result: BulkWriteResult) { + if let Some(ref mut partial_result) = self.partial_result { + partial_result.merge(other_partial_result); + } else { + self.partial_result = Some(other_partial_result); + } + } +} diff --git a/src/action/bulk_write/results.rs b/src/action/bulk_write/results.rs new file mode 100644 index 000000000..82896863f --- /dev/null +++ b/src/action/bulk_write/results.rs @@ -0,0 +1,52 @@ +#![allow(missing_docs)] + +use std::{collections::HashMap, fmt::Debug}; + +use serde::Serialize; + +use crate::{ + results::{DeleteResult, InsertOneResult, UpdateResult}, + serde_util::serialize_indexed_map, +}; + +#[derive(Clone, Debug, Default, Serialize)] +#[serde(rename_all = "camelCase")] +#[non_exhaustive] +pub struct BulkWriteResult { + pub inserted_count: i64, + pub upserted_count: i64, + pub matched_count: i64, + pub modified_count: i64, + pub deleted_count: i64, + #[serde(serialize_with = "serialize_indexed_map")] + pub insert_results: HashMap, + #[serde(serialize_with = "serialize_indexed_map")] + pub update_results: HashMap, + #[serde(serialize_with = "serialize_indexed_map")] + pub delete_results: HashMap, +} + +impl BulkWriteResult { + pub(crate) fn add_insert_result(&mut self, index: usize, insert_result: InsertOneResult) { + self.insert_results.insert(index, insert_result); + } + + pub(crate) fn add_update_result(&mut self, index: usize, update_result: UpdateResult) { + self.update_results.insert(index, update_result); + } + + pub(crate) fn add_delete_result(&mut self, index: usize, delete_result: DeleteResult) { + self.delete_results.insert(index, delete_result); + } + + pub(crate) fn merge(&mut self, other: Self) { + self.inserted_count += other.inserted_count; + self.upserted_count += other.upserted_count; + self.matched_count += other.matched_count; + self.modified_count += other.modified_count; + self.deleted_count += other.deleted_count; + self.insert_results.extend(other.insert_results); + self.update_results.extend(other.update_results); + self.delete_results.extend(other.delete_results); + } +} diff --git a/src/client/bulk_write/models.rs b/src/action/bulk_write/write_models.rs similarity index 56% rename from src/client/bulk_write/models.rs rename to src/action/bulk_write/write_models.rs index 9a00cf76d..ee60add9d 100644 --- a/src/client/bulk_write/models.rs +++ b/src/action/bulk_write/write_models.rs @@ -1,10 +1,9 @@ -use std::collections::HashMap; - use serde::Serialize; use serde_with::skip_serializing_none; use crate::{ - bson::{oid::ObjectId, Array, Bson, Document, RawDocumentBuf}, + bson::{rawdoc, Array, Bson, Document, RawDocumentBuf}, + bson_util::get_or_prepend_id_field, error::Result, options::UpdateModifications, Namespace, @@ -12,7 +11,7 @@ use crate::{ #[skip_serializing_none] #[derive(Clone, Debug, Serialize)] -#[serde(untagged, rename_all = "camelCase")] +#[serde(untagged)] #[non_exhaustive] pub enum WriteModel { #[non_exhaustive] @@ -22,6 +21,7 @@ pub enum WriteModel { document: Document, }, #[non_exhaustive] + #[serde(rename_all = "camelCase")] UpdateOne { #[serde(skip)] namespace: Namespace, @@ -34,6 +34,7 @@ pub enum WriteModel { upsert: Option, }, #[non_exhaustive] + #[serde(rename_all = "camelCase")] UpdateMany { #[serde(skip)] namespace: Namespace, @@ -46,6 +47,7 @@ pub enum WriteModel { upsert: Option, }, #[non_exhaustive] + #[serde(rename_all = "camelCase")] ReplaceOne { #[serde(skip)] namespace: Namespace, @@ -75,6 +77,12 @@ pub enum WriteModel { }, } +pub(crate) enum OperationType { + Insert, + Update, + Delete, +} + impl WriteModel { pub(crate) fn namespace(&self) -> &Namespace { match self { @@ -87,45 +95,55 @@ impl WriteModel { } } - pub(crate) fn operation_name(&self) -> &'static str { + pub(crate) fn operation_type(&self) -> OperationType { match self { - Self::DeleteOne { .. } | Self::DeleteMany { .. } => "delete", - Self::InsertOne { .. } => "insert", - Self::ReplaceOne { .. } | Self::UpdateOne { .. } | Self::UpdateMany { .. } => "update", + Self::InsertOne { .. } => OperationType::Insert, + Self::UpdateOne { .. } | Self::UpdateMany { .. } | Self::ReplaceOne { .. } => { + OperationType::Update + } + Self::DeleteOne { .. } | Self::DeleteMany { .. } => OperationType::Delete, } } - pub(crate) fn to_raw_doc(&self) -> Result { - let mut doc = bson::to_raw_document_buf(&self)?; + /// Whether this operation should apply to all documents that match the filter. Returns None if + /// the operation does not use a filter. + pub(crate) fn multi(&self) -> Option { match self { + Self::UpdateMany { .. } | Self::DeleteMany { .. } => Some(true), Self::UpdateOne { .. } | Self::ReplaceOne { .. } | Self::DeleteOne { .. } => { - doc.append("multi", false); - } - Self::UpdateMany { .. } | Self::DeleteMany { .. } => { - doc.append("multi", true); + Some(false) } - _ => {} + Self::InsertOne { .. } => None, } - Ok(doc) } -} -pub(crate) fn add_ids_to_insert_one_models( - models: &mut [WriteModel], -) -> Result> { - let mut ids = HashMap::new(); - for (i, model) in models.iter_mut().enumerate() { - if let WriteModel::InsertOne { document, .. } = model { - let id = match document.get("_id") { - Some(id) => id.clone(), - None => { - let id = ObjectId::new(); - document.insert("_id", id); - Bson::ObjectId(id) - } - }; - ids.insert(i, id); + pub(crate) fn operation_name(&self) -> &'static str { + match self.operation_type() { + OperationType::Insert => "insert", + OperationType::Update => "update", + OperationType::Delete => "delete", } } - Ok(ids) + + // Returns the operation-specific fields that should be included in this model's entry in the + // ops array. Also returns an inserted ID if this is an insert operation. + pub(crate) fn get_ops_document_contents(&self) -> Result<(RawDocumentBuf, Option)> { + let (mut model_document, inserted_id) = match self { + Self::InsertOne { document, .. } => { + let mut insert_document = RawDocumentBuf::from_document(document)?; + let inserted_id = get_or_prepend_id_field(&mut insert_document)?; + (rawdoc! { "document": insert_document }, Some(inserted_id)) + } + _ => { + let model_document = bson::to_raw_document_buf(&self)?; + (model_document, None) + } + }; + + if let Some(multi) = self.multi() { + model_document.append("multi", multi); + } + + Ok((model_document, inserted_id)) + } } diff --git a/src/bson_util.rs b/src/bson_util.rs index d5f4b5047..7de867c3a 100644 --- a/src/bson_util.rs +++ b/src/bson_util.rs @@ -4,7 +4,16 @@ use std::{ }; use crate::{ - bson::{Bson, Document, RawArrayBuf, RawBson, RawBsonRef, RawDocumentBuf}, + bson::{ + oid::ObjectId, + rawdoc, + Bson, + Document, + RawArrayBuf, + RawBson, + RawBsonRef, + RawDocumentBuf, + }, error::{ErrorKind, Result}, runtime::SyncLittleEndianRead, }; @@ -54,16 +63,6 @@ pub(crate) fn to_raw_bson_array(docs: &[Document]) -> Result { Ok(RawBson::Array(array)) } -#[cfg(test)] -pub(crate) fn sort_document(document: &mut Document) { - let temp = std::mem::take(document); - - let mut elements: Vec<_> = temp.into_iter().collect(); - elements.sort_by(|e1, e2| e1.0.cmp(&e2.0)); - - document.extend(elements); -} - pub(crate) fn first_key(document: &Document) -> Option<&str> { document.keys().next().map(String::as_str) } @@ -89,18 +88,26 @@ pub(crate) fn update_document_check(update: &Document) -> Result<()> { } /// The size in bytes of the provided document's entry in a BSON array at the given index. -pub(crate) fn array_entry_size_bytes(index: usize, doc_len: usize) -> u64 { +pub(crate) fn array_entry_size_bytes(index: usize, doc_len: usize) -> usize { // * type (1 byte) // * number of decimal digits in key // * null terminator for the key (1 byte) // * size of value - 1 + num_decimal_digits(index) + 1 + doc_len as u64 + 1 + num_decimal_digits(index) + 1 + doc_len +} + +pub(crate) fn vec_to_raw_array_buf(docs: Vec) -> RawArrayBuf { + let mut array = RawArrayBuf::new(); + for doc in docs { + array.push(doc); + } + array } /// The number of digits in `n` in base 10. /// Useful for calculating the size of an array entry in BSON. -fn num_decimal_digits(mut n: usize) -> u64 { +fn num_decimal_digits(mut n: usize) -> usize { let mut digits = 0; loop { @@ -136,6 +143,30 @@ pub(crate) fn extend_raw_document_buf( Ok(()) } +/// Returns the _id field of this document, prepending the field to the document if one is not +/// already present. +pub(crate) fn get_or_prepend_id_field(doc: &mut RawDocumentBuf) -> Result { + match doc.get("_id")? { + Some(id) => Ok(id.try_into()?), + None => { + let id = ObjectId::new(); + let mut new_bytes = rawdoc! { "_id": id }.into_bytes(); + + // Remove the trailing null byte (which will be replaced by the null byte in the given + // document) and append the document's elements + new_bytes.pop(); + new_bytes.extend(&doc.as_bytes()[4..]); + + let new_length = (new_bytes.len() as i32).to_le_bytes(); + new_bytes[0..4].copy_from_slice(&new_length); + + *doc = RawDocumentBuf::from_bytes(new_bytes)?; + + Ok(id.into()) + } + } +} + #[cfg(test)] mod test { use crate::bson_util::num_decimal_digits; diff --git a/src/client.rs b/src/client.rs index 4ab439eb5..f92a5abb6 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,6 +1,5 @@ pub mod action; pub mod auth; -pub(crate) mod bulk_write; #[cfg(feature = "in-use-encryption-unstable")] pub(crate) mod csfle; mod executor; @@ -208,6 +207,26 @@ impl Client { )) } + /// Whether commands sent via this client should be auto-encrypted. + pub(crate) async fn should_auto_encrypt(&self) -> bool { + #[cfg(feature = "in-use-encryption-unstable")] + { + let csfle = self.inner.csfle.read().await; + match *csfle { + Some(ref csfle) => csfle + .opts() + .bypass_auto_encryption + .map(|b| !b) + .unwrap_or(true), + None => false, + } + } + #[cfg(not(feature = "in-use-encryption-unstable"))] + { + false + } + } + #[cfg(all(test, feature = "in-use-encryption-unstable"))] pub(crate) async fn mongocryptd_spawned(&self) -> bool { self.inner diff --git a/src/client/bulk_write.rs b/src/client/bulk_write.rs deleted file mode 100644 index 64eab4782..000000000 --- a/src/client/bulk_write.rs +++ /dev/null @@ -1,57 +0,0 @@ -#![allow(missing_docs, unused_variables, dead_code)] - -pub(crate) mod actions; -pub(crate) mod models; -pub(crate) mod results; - -use serde::{Deserialize, Serialize, Serializer}; -use serde_with::skip_serializing_none; - -use crate::{ - bson::{Bson, Document}, - Client, -}; - -use actions::SummaryBulkWriteAction; -use models::WriteModel; - -impl Client { - pub fn bulk_write( - &self, - models: impl IntoIterator, - ) -> SummaryBulkWriteAction { - SummaryBulkWriteAction::new(self.clone(), models.into_iter().collect()) - } -} - -#[skip_serializing_none] -#[derive(Clone, Debug, Default, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -#[non_exhaustive] -pub(crate) struct BulkWriteOptions { - #[serde(default = "some_true")] - pub ordered: Option, - pub bypass_document_validation: Option, - pub comment: Option, - #[serde(rename = "let")] - pub let_vars: Option, - #[serialize_always] - #[serde( - alias = "verboseResults", - rename = "errorsOnly", - serialize_with = "serialize_opposite_bool" - )] - pub verbose_results: Option, -} - -fn some_true() -> Option { - Some(true) -} - -fn serialize_opposite_bool( - val: &Option, - serializer: S, -) -> std::result::Result { - let val = !val.unwrap_or(false); - serializer.serialize_bool(val) -} diff --git a/src/client/bulk_write/actions.rs b/src/client/bulk_write/actions.rs deleted file mode 100644 index a4ba3448a..000000000 --- a/src/client/bulk_write/actions.rs +++ /dev/null @@ -1,153 +0,0 @@ -use std::future::IntoFuture; - -use futures_core::future::BoxFuture; -use futures_util::FutureExt; - -use crate::{ - bson::{Bson, Document}, - error::{ErrorKind, Result}, - operation::BulkWrite, - Client, -}; - -use super::{ - models::add_ids_to_insert_one_models, - results::{SummaryBulkWriteResult, VerboseBulkWriteResult}, - BulkWriteOptions, - WriteModel, -}; - -pub struct VerboseBulkWriteAction { - client: Client, - models: Vec, - options: BulkWriteOptions, -} - -impl VerboseBulkWriteAction { - pub fn ordered(mut self, ordered: bool) -> Self { - self.options.ordered = Some(ordered); - self - } - - pub fn bypass_document_validation(mut self, bypass_document_validation: bool) -> Self { - self.options.bypass_document_validation = Some(bypass_document_validation); - self - } - - pub fn comment(mut self, comment: impl Into) -> Self { - self.options.comment = Some(comment.into()); - self - } - - pub fn let_vars(mut self, let_vars: Document) -> Self { - self.options.let_vars = Some(let_vars); - self - } -} - -impl IntoFuture for VerboseBulkWriteAction { - type Output = Result; - type IntoFuture = BoxFuture<'static, Self::Output>; - - fn into_future(mut self) -> Self::IntoFuture { - async move { - let inserted_ids = add_ids_to_insert_one_models(&mut self.models)?; - - let bulk_write = BulkWrite { - models: &self.models, - options: self.options, - client: self.client.clone(), - }; - let (mut cursor, summary_info) = - self.client.execute_operation(bulk_write, None).await?; - - let mut results = VerboseBulkWriteResult::new(summary_info, inserted_ids); - while cursor.advance().await? { - let response = cursor.deserialize_current()?; - let model = - self.models - .get(response.index) - .ok_or_else(|| ErrorKind::InvalidResponse { - message: format!( - "unknown index returned from bulkWrite: {}", - response.index - ), - })?; - - match model { - WriteModel::InsertOne { .. } => { - debug_assert!(!response.is_update_result()); - } - WriteModel::UpdateOne { .. } - | WriteModel::UpdateMany { .. } - | WriteModel::ReplaceOne { .. } => { - results.add_update_result(response)?; - } - WriteModel::DeleteOne { .. } | WriteModel::DeleteMany { .. } => { - debug_assert!(!response.is_update_result()); - results.add_delete_result(response); - } - } - } - - Ok(results) - } - .boxed() - } -} - -pub struct SummaryBulkWriteAction { - inner: VerboseBulkWriteAction, -} - -impl SummaryBulkWriteAction { - pub(crate) fn new(client: Client, models: Vec) -> Self { - Self { - inner: VerboseBulkWriteAction { - client, - models, - options: Default::default(), - }, - } - } - - pub fn ordered(self, ordered: bool) -> Self { - Self { - inner: self.inner.ordered(ordered), - } - } - - pub fn bypass_document_validation(self, bypass_document_validation: bool) -> Self { - Self { - inner: self - .inner - .bypass_document_validation(bypass_document_validation), - } - } - - pub fn comment(self, comment: impl Into) -> Self { - Self { - inner: self.inner.comment(comment), - } - } - - pub fn let_vars(self, let_vars: Document) -> Self { - Self { - inner: self.inner.let_vars(let_vars), - } - } - - pub fn verbose_results(mut self) -> VerboseBulkWriteAction { - self.inner.options.verbose_results = Some(true); - self.inner - } -} - -impl IntoFuture for SummaryBulkWriteAction { - type Output = Result; - type IntoFuture = BoxFuture<'static, Self::Output>; - - fn into_future(self) -> Self::IntoFuture { - async move { self.inner.await.map(Into::into) }.boxed() - } -} diff --git a/src/client/bulk_write/results.rs b/src/client/bulk_write/results.rs deleted file mode 100644 index 029b6acd5..000000000 --- a/src/client/bulk_write/results.rs +++ /dev/null @@ -1,107 +0,0 @@ -#![allow(missing_docs)] - -use std::collections::HashMap; - -use serde::Serialize; - -use crate::{ - bson::Bson, - error::{Error, ErrorKind, Result}, - operation::{BulkWriteOperationResponse, BulkWriteSummaryInfo}, - results::{DeleteResult, InsertOneResult, UpdateResult}, - serde_util::serialize_indexed_map, -}; - -#[derive(Clone, Debug, Default, Serialize)] -#[serde(rename_all = "camelCase")] -#[non_exhaustive] -pub struct VerboseBulkWriteResult { - pub inserted_count: i64, - pub upserted_count: i64, - pub matched_count: i64, - pub modified_count: i64, - pub deleted_count: i64, - #[serde(serialize_with = "serialize_indexed_map")] - pub insert_results: HashMap, - #[serde(serialize_with = "serialize_indexed_map")] - pub update_results: HashMap, - #[serde(serialize_with = "serialize_indexed_map")] - pub delete_results: HashMap, -} - -impl VerboseBulkWriteResult { - pub(crate) fn new( - summary_info: BulkWriteSummaryInfo, - inserted_ids: HashMap, - ) -> Self { - Self { - inserted_count: summary_info.n_inserted, - upserted_count: summary_info.n_upserted, - matched_count: summary_info.n_matched, - modified_count: summary_info.n_modified, - deleted_count: summary_info.n_deleted, - insert_results: inserted_ids - .into_iter() - .map(|(index, id)| (index, InsertOneResult { inserted_id: id })) - .collect(), - update_results: HashMap::new(), - delete_results: HashMap::new(), - } - } - - pub(crate) fn add_update_result(&mut self, response: BulkWriteOperationResponse) -> Result<()> { - self.update_results - .insert(response.index, response.try_into()?); - Ok(()) - } - - pub(crate) fn add_delete_result(&mut self, response: BulkWriteOperationResponse) { - let delete_result = DeleteResult { - deleted_count: response.n, - }; - self.delete_results.insert(response.index, delete_result); - } -} - -impl TryFrom for UpdateResult { - type Error = Error; - - fn try_from(response: BulkWriteOperationResponse) -> Result { - let modified_count = response - .n_modified - .ok_or_else(|| ErrorKind::InvalidResponse { - message: "missing nModified field in update operation response".into(), - })?; - Ok(Self { - matched_count: response.n, - modified_count, - upserted_id: response.upserted.map(|upserted| upserted.id), - }) - } -} - -#[derive(Clone, Debug, Serialize)] -#[serde(rename_all = "camelCase")] -#[non_exhaustive] -pub struct SummaryBulkWriteResult { - pub inserted_count: i64, - pub upserted_count: i64, - pub matched_count: i64, - pub modified_count: i64, - pub deleted_count: i64, - #[serde(serialize_with = "serialize_indexed_map")] - pub insert_results: HashMap, -} - -impl From for SummaryBulkWriteResult { - fn from(verbose_result: VerboseBulkWriteResult) -> Self { - Self { - inserted_count: verbose_result.inserted_count, - upserted_count: verbose_result.upserted_count, - matched_count: verbose_result.matched_count, - modified_count: verbose_result.modified_count, - deleted_count: verbose_result.deleted_count, - insert_results: verbose_result.insert_results, - } - } -} diff --git a/src/client/executor.rs b/src/client/executor.rs index 0a3b0c249..693c4546b 100644 --- a/src/client/executor.rs +++ b/src/client/executor.rs @@ -7,6 +7,7 @@ use once_cell::sync::Lazy; use serde::de::DeserializeOwned; use std::{ + borrow::BorrowMut, collections::HashSet, sync::{atomic::Ordering, Arc}, time::Instant, @@ -53,6 +54,7 @@ use crate::{ CommandErrorBody, CommitTransaction, Operation, + OperationResponse, Retryability, }, options::{ChangeStreamOptions, SelectionCriteria}, @@ -90,17 +92,17 @@ impl Client { /// sessions and an explicit session is not provided. pub(crate) async fn execute_operation( &self, - op: T, + mut op: impl BorrowMut, session: impl Into>, ) -> Result { - self.execute_operation_with_details(op, session) + self.execute_operation_with_details(op.borrow_mut(), session) .await .map(|details| details.output) } async fn execute_operation_with_details( &self, - op: T, + op: &mut T, session: impl Into>, ) -> Result> { if self.inner.shutdown.executed.load(Ordering::SeqCst) { @@ -144,12 +146,17 @@ impl Client { /// Execute the given operation, returning the cursor created by the operation. /// /// Server selection be will performed using the criteria specified on the operation, if any. - pub(crate) async fn execute_cursor_operation(&self, op: Op) -> Result> + pub(crate) async fn execute_cursor_operation( + &self, + mut op: impl BorrowMut, + ) -> Result> where Op: Operation, { Box::pin(async { - let mut details = self.execute_operation_with_details(op, None).await?; + let mut details = self + .execute_operation_with_details(op.borrow_mut(), None) + .await?; let pinned = self.pin_connection_for_cursor(&details.output, &mut details.connection)?; Ok(Cursor::new( @@ -164,14 +171,14 @@ impl Client { pub(crate) async fn execute_session_cursor_operation( &self, - op: Op, + mut op: impl BorrowMut, session: &mut ClientSession, ) -> Result> where Op: Operation, { let mut details = self - .execute_operation_with_details(op, &mut *session) + .execute_operation_with_details(op.borrow_mut(), &mut *session) .await?; let pinned = @@ -229,10 +236,10 @@ impl Client { let mut implicit_session = resume_data .as_mut() .and_then(|rd| rd.implicit_session.take()); - let op = ChangeStreamAggregate::new(&args, resume_data)?; + let mut op = ChangeStreamAggregate::new(&args, resume_data)?; let mut details = self - .execute_operation_with_details(op, implicit_session.as_mut()) + .execute_operation_with_details(&mut op, implicit_session.as_mut()) .await?; if let Some(session) = implicit_session { details.implicit_session = Some(session); @@ -264,10 +271,10 @@ impl Client { target, options, }; - let op = ChangeStreamAggregate::new(&args, resume_data)?; + let mut op = ChangeStreamAggregate::new(&args, resume_data)?; let mut details = self - .execute_operation_with_details(op, &mut *session) + .execute_operation_with_details(&mut op, &mut *session) .await?; let (cursor_spec, cs_data) = details.output; let pinned = @@ -283,7 +290,7 @@ impl Client { /// session. Retries the operation upon failure if retryability is supported. async fn execute_operation_with_retry( &self, - mut op: T, + op: &mut T, mut session: Option<&mut ClientSession>, ) -> Result> { // If the current transaction has been committed/aborted and it is not being @@ -330,7 +337,7 @@ impl Client { }; let server_addr = server.address.clone(); - let mut conn = match get_connection(&session, &op, &server.pool).await { + let mut conn = match get_connection(&session, op, &server.pool).await { Ok(c) => c, Err(mut err) => { retry.first_error()?; @@ -340,7 +347,7 @@ impl Client { err.add_label(RETRYABLE_WRITE_ERROR); } - let op_retry = match self.get_op_retryability(&op, &session) { + let op_retry = match self.get_op_retryability(op, &session) { Retryability::Read => err.is_read_retryable(), Retryability::Write => err.is_write_retryable(), _ => false, @@ -371,7 +378,7 @@ impl Client { session = implicit_session.as_mut(); } - let retryability = self.get_retryability(&conn, &op, &session)?; + let retryability = self.get_retryability(&conn, op, &session)?; if retryability == Retryability::None { retry.first_error()?; } @@ -383,7 +390,7 @@ impl Client { let details = match self .execute_operation_on_connection( - &mut op, + op, &mut conn, &mut session, txn_number, @@ -757,7 +764,15 @@ impl Client { } }; - match op.handle_response(response, connection.stream_description()?) { + let response_result = match op.handle_response( + response, + connection.stream_description()?, + session.as_deref_mut(), + ) { + OperationResponse::Sync(result) => result, + OperationResponse::Async(future) => future.await, + }; + match response_result { Ok(response) => Ok(response), Err(mut err) => { err.add_labels_and_update_pin( diff --git a/src/cmap/conn/command.rs b/src/cmap/conn/command.rs index 933125156..05059731f 100644 --- a/src/cmap/conn/command.rs +++ b/src/cmap/conn/command.rs @@ -199,18 +199,6 @@ impl RawCommandResponse { }) } - /// Initialize a response from a document. - #[cfg(test)] - pub(crate) fn with_document(doc: Document) -> Result { - Self::with_document_and_address( - ServerAddress::Tcp { - host: "localhost".to_string(), - port: None, - }, - doc, - ) - } - pub(crate) fn new(source: ServerAddress, message: Message) -> Self { Self::new_raw(source, message.document_payload) } diff --git a/src/cmap/conn/stream_description.rs b/src/cmap/conn/stream_description.rs index b24164e26..405806ab9 100644 --- a/src/cmap/conn/stream_description.rs +++ b/src/cmap/conn/stream_description.rs @@ -78,29 +78,4 @@ impl StreamDescription { && self.logical_session_timeout.is_some() && self.max_wire_version.map_or(false, |version| version >= 6) } - - /// Gets a description of a stream for a 4.2 connection. - #[cfg(test)] - pub(crate) fn new_testing() -> Self { - Self::with_wire_version(8) - } - - /// Gets a description of a stream for a connection to a server with the provided - /// maxWireVersion. - #[cfg(test)] - pub(crate) fn with_wire_version(max_wire_version: i32) -> Self { - Self { - server_address: Default::default(), - initial_server_type: Default::default(), - max_wire_version: Some(max_wire_version), - min_wire_version: Some(max_wire_version), - sasl_supported_mechs: Default::default(), - logical_session_timeout: Some(Duration::from_secs(30 * 60)), - max_bson_object_size: 16 * 1024 * 1024, - max_write_batch_size: 100_000, - hello_ok: false, - max_message_size_bytes: 48_000_000, - service_id: None, - } - } } diff --git a/src/coll.rs b/src/coll.rs index 8e2fad232..2b3175f9f 100644 --- a/src/coll.rs +++ b/src/coll.rs @@ -484,10 +484,6 @@ where } let ordered = options.as_ref().and_then(|o| o.ordered).unwrap_or(true); - #[cfg(feature = "in-use-encryption-unstable")] - let encrypted = self.client().auto_encryption_opts().await.is_some(); - #[cfg(not(feature = "in-use-encryption-unstable"))] - let encrypted = false; let mut cumulative_failure: Option = None; let mut error_labels: HashSet = Default::default(); @@ -501,7 +497,7 @@ where self.namespace(), docs, options.clone(), - encrypted, + self.client().should_auto_encrypt().await, self.inner.human_readable_serialization, ); @@ -626,16 +622,11 @@ where let mut options = options.into(); resolve_write_concern_with_session!(self, options, session.as_ref())?; - #[cfg(feature = "in-use-encryption-unstable")] - let encrypted = self.client().auto_encryption_opts().await.is_some(); - #[cfg(not(feature = "in-use-encryption-unstable"))] - let encrypted = false; - let insert = Insert::new( self.namespace(), vec![doc], options.map(InsertManyOptions::from_insert_one_options), - encrypted, + self.client().should_auto_encrypt().await, self.inner.human_readable_serialization, ); self.client() @@ -758,14 +749,6 @@ impl Namespace { } } - #[cfg(test)] - pub(crate) fn empty() -> Self { - Self { - db: String::new(), - coll: String::new(), - } - } - pub(crate) fn from_str(s: &str) -> Option { let mut parts = s.split('.'); @@ -793,24 +776,16 @@ impl<'de> Deserialize<'de> for Namespace { where D: Deserializer<'de>, { - #[derive(Deserialize)] - struct NamespaceHelper { - db: String, - coll: String, - } #[derive(Deserialize)] #[serde(untagged)] - enum NamespaceOptions { + enum NamespaceHelper { String(String), - Object(NamespaceHelper), + Object { db: String, coll: String }, } - match NamespaceOptions::deserialize(deserializer)? { - NamespaceOptions::String(string) => Self::from_str(&string) + match NamespaceHelper::deserialize(deserializer)? { + NamespaceHelper::String(string) => Self::from_str(&string) .ok_or_else(|| D::Error::custom("Missing one or more fields in namespace")), - NamespaceOptions::Object(object) => Ok(Self { - db: object.db, - coll: object.coll, - }), + NamespaceHelper::Object { db, coll } => Ok(Self { db, coll }), } } } diff --git a/src/cursor/common.rs b/src/cursor/common.rs index 30d05ca0a..4c8bc3b5a 100644 --- a/src/cursor/common.rs +++ b/src/cursor/common.rs @@ -443,21 +443,6 @@ impl CursorSpecification { pub(crate) fn id(&self) -> i64 { self.info.id } - - #[cfg(test)] - pub(crate) fn address(&self) -> &ServerAddress { - &self.info.address - } - - #[cfg(test)] - pub(crate) fn batch_size(&self) -> Option { - self.info.batch_size - } - - #[cfg(test)] - pub(crate) fn max_time(&self) -> Option { - self.info.max_time - } } /// Static information about a cursor. diff --git a/src/error.rs b/src/error.rs index b1dda0098..b3efa73d2 100644 --- a/src/error.rs +++ b/src/error.rs @@ -16,6 +16,13 @@ use bson::Bson; use serde::{Deserialize, Serialize}; use thiserror::Error; +use crate::{ + action::bulk_write::error::BulkWriteError as ClientBulkWriteError, + bson::Document, + options::ServerAddress, + sdam::{ServerType, TopologyVersion}, +}; + const RECOVERING_CODES: [i32; 5] = [11600, 11602, 13436, 189, 91]; const NOTWRITABLEPRIMARY_CODES: [i32; 3] = [10107, 13435, 10058]; const SHUTTING_DOWN_CODES: [i32; 2] = [11600, 91]; @@ -250,7 +257,13 @@ impl Error { /// Whether this error contains the specified label. pub fn contains_label>(&self, label: T) -> bool { - self.labels().contains(label.as_ref()) + let label = label.as_ref(); + self.labels().contains(label) + || self + .source + .as_ref() + .map(|source| source.contains_label(label)) + .unwrap_or(false) } /// Adds the given label to this error. @@ -300,7 +313,7 @@ impl Error { } /// Gets the code from this error. - #[allow(unused)] + #[cfg(test)] pub(crate) fn code(&self) -> Option { match self.kind.as_ref() { ErrorKind::Command(command_error) => Some(command_error.code), @@ -450,6 +463,10 @@ impl Error { /// sensitive commands. Currently, the only field besides those that we expose is the /// error message. pub(crate) fn redact(&mut self) { + if let Some(source) = self.source.as_deref_mut() { + source.redact(); + } + // This is intentionally written without a catch-all branch so that if new error // kinds are added we remember to reason about whether they need to be redacted. match *self.kind { @@ -463,6 +480,14 @@ impl Error { wce.redact(); } } + ErrorKind::ClientBulkWrite(ref mut client_bulk_write_error) => { + for write_concern_error in client_bulk_write_error.write_concern_errors.iter_mut() { + write_concern_error.redact(); + } + for (_, write_error) in client_bulk_write_error.write_errors.iter_mut() { + write_error.redact(); + } + } ErrorKind::Command(ref mut command_error) => { command_error.redact(); } @@ -574,6 +599,9 @@ pub enum ErrorKind { #[error("An error occurred when trying to execute a write operation: {0:?}")] BulkWrite(BulkWriteFailure), + #[error("An error occurred when executing Client::bulk_write: {0:?}")] + ClientBulkWrite(ClientBulkWriteError), + /// The server returned an error to an attempted operation. #[error("Command failed: {0}")] // note that if this Display impl changes, COMMAND_ERROR_REGEX in the unified runner matching @@ -876,6 +904,7 @@ impl WriteFailure { } } + #[cfg(test)] pub(crate) fn code(&self) -> i32 { match self { Self::WriteConcernError(e) => e.code, diff --git a/src/operation.rs b/src/operation.rs index 5007786cb..486a5dd47 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -1,6 +1,6 @@ mod abort_transaction; pub(crate) mod aggregate; -mod bulk_write; +pub(crate) mod bulk_write; mod commit_transaction; pub(crate) mod count; pub(crate) mod count_documents; @@ -24,9 +24,6 @@ pub(crate) mod run_cursor_command; mod search_index; mod update; -#[cfg(test)] -mod test; - use std::{collections::VecDeque, fmt::Debug, ops::Deref}; use bson::{RawBsonRef, RawDocument, RawDocumentBuf, Timestamp}; @@ -49,6 +46,8 @@ use crate::{ }, options::WriteConcern, selection_criteria::SelectionCriteria, + BoxFuture, + ClientSession, Namespace, }; @@ -73,9 +72,33 @@ const SERVER_4_2_0_WIRE_VERSION: i32 = 8; const SERVER_4_4_0_WIRE_VERSION: i32 = 9; // The maximum number of bytes that may be included in a write payload when auto-encryption is // enabled. -const MAX_ENCRYPTED_WRITE_SIZE: u64 = 2_097_152; +const MAX_ENCRYPTED_WRITE_SIZE: usize = 2_097_152; // The amount of overhead bytes to account for when building a document sequence. -const COMMAND_OVERHEAD_SIZE: u64 = 16_000; +const COMMAND_OVERHEAD_SIZE: usize = 16_000; + +pub(crate) enum OperationResponse<'a, O> { + Sync(Result), + Async(BoxFuture<'a, Result>), +} + +impl<'a, O> OperationResponse<'a, O> { + /// Returns the sync result contained within this `OperationResponse`. Use responsibly, when it + /// is known that the response is not async. + fn get_sync_result(self) -> Result { + match self { + Self::Sync(result) => result, + Self::Async(_) => unreachable!(), + } + } +} + +macro_rules! handle_response_sync { + ($result:block) => { + let result = || $result; + OperationResponse::Sync(result()) + }; +} +use handle_response_sync; /// A trait modeling the behavior of a server side operation. /// @@ -100,11 +123,12 @@ pub(crate) trait Operation { fn extract_at_cluster_time(&self, _response: &RawDocument) -> Result>; /// Interprets the server response to the command. - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, - ) -> Result; + description: &'a StreamDescription, + session: Option<&'a mut ClientSession>, + ) -> OperationResponse<'a, Self::O>; /// Interpret an error encountered while sending the built command to the server, potentially /// recovering. @@ -413,11 +437,12 @@ pub(crate) trait OperationWithDefaults { } /// Interprets the server response to the command. - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, - ) -> Result; + description: &'a StreamDescription, + session: Option<&'a mut ClientSession>, + ) -> OperationResponse<'a, Self::O>; /// Interpret an error encountered while sending the built command to the server, potentially /// recovering. @@ -479,12 +504,13 @@ impl Operation for T { fn extract_at_cluster_time(&self, response: &RawDocument) -> Result> { self.extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, - ) -> Result { - self.handle_response(response, description) + description: &'a StreamDescription, + session: Option<&'a mut ClientSession>, + ) -> OperationResponse<'a, Self::O> { + self.handle_response(response, description, session) } fn handle_error(&self, error: Error) -> Result { self.handle_error(error) diff --git a/src/operation/abort_transaction.rs b/src/operation/abort_transaction.rs index 7a08861dd..90de0dc50 100644 --- a/src/operation/abort_transaction.rs +++ b/src/operation/abort_transaction.rs @@ -8,9 +8,10 @@ use crate::{ operation::Retryability, options::WriteConcern, selection_criteria::SelectionCriteria, + ClientSession, }; -use super::{OperationWithDefaults, WriteConcernOnlyBody}; +use super::{handle_response_sync, OperationResponse, OperationWithDefaults, WriteConcernOnlyBody}; pub(crate) struct AbortTransaction { write_concern: Option, @@ -53,9 +54,12 @@ impl OperationWithDefaults for AbortTransaction { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: WriteConcernOnlyBody = response.body()?; - response.validate() + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: WriteConcernOnlyBody = response.body()?; + response.validate() + }} } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/aggregate.rs b/src/operation/aggregate.rs index 3234f2e1e..c4ef14506 100644 --- a/src/operation/aggregate.rs +++ b/src/operation/aggregate.rs @@ -1,8 +1,5 @@ pub(crate) mod change_stream; -#[cfg(test)] -mod test; - use crate::{ bson::{doc, Bson, Document}, bson_util, @@ -11,11 +8,14 @@ use crate::{ error::Result, operation::{append_options, remove_empty_write_concern, Retryability}, options::{AggregateOptions, SelectionCriteria, WriteConcern}, + ClientSession, Namespace, }; use super::{ + handle_response_sync, CursorBody, + OperationResponse, OperationWithDefaults, WriteConcernOnlyBody, SERVER_4_2_0_WIRE_VERSION, @@ -30,11 +30,6 @@ pub(crate) struct Aggregate { } impl Aggregate { - #[cfg(test)] - fn empty() -> Self { - Self::new(Namespace::empty(), Vec::new(), None) - } - pub(crate) fn new( target: impl Into, pipeline: impl IntoIterator, @@ -91,28 +86,31 @@ impl OperationWithDefaults for Aggregate { &self, response: RawCommandResponse, description: &StreamDescription, - ) -> Result { - let cursor_response: CursorBody = response.body()?; - - if self.is_out_or_merge() { - let wc_error_info = response.body::()?; - wc_error_info.validate()?; - }; - - // The comment should only be propagated to getMore calls on 4.4+. - let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { - None - } else { - self.options.as_ref().and_then(|opts| opts.comment.clone()) - }; - - Ok(CursorSpecification::new( - cursor_response.cursor, - description.server_address.clone(), - self.options.as_ref().and_then(|opts| opts.batch_size), - self.options.as_ref().and_then(|opts| opts.max_await_time), - comment, - )) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let cursor_response: CursorBody = response.body()?; + + if self.is_out_or_merge() { + let wc_error_info = response.body::()?; + wc_error_info.validate()?; + }; + + // The comment should only be propagated to getMore calls on 4.4+. + let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { + None + } else { + self.options.as_ref().and_then(|opts| opts.comment.clone()) + }; + + Ok(CursorSpecification::new( + cursor_response.cursor, + description.server_address.clone(), + self.options.as_ref().and_then(|opts| opts.batch_size), + self.options.as_ref().and_then(|opts| opts.max_await_time), + comment, + )) + }} } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/aggregate/change_stream.rs b/src/operation/aggregate/change_stream.rs index 50a108cc3..19ab5c0b3 100644 --- a/src/operation/aggregate/change_stream.rs +++ b/src/operation/aggregate/change_stream.rs @@ -6,9 +6,10 @@ use crate::{ error::Result, operation::{append_options, OperationWithDefaults, Retryability}, options::{ChangeStreamOptions, SelectionCriteria, WriteConcern}, + ClientSession, }; -use super::Aggregate; +use super::{handle_response_sync, Aggregate, OperationResponse}; pub(crate) struct ChangeStreamAggregate { inner: Aggregate, @@ -86,31 +87,37 @@ impl OperationWithDefaults for ChangeStreamAggregate { &self, response: RawCommandResponse, description: &StreamDescription, - ) -> Result { - let op_time = response - .raw_body() - .get("operationTime")? - .and_then(bson::RawBsonRef::as_timestamp); - let spec = self.inner.handle_response(response, description)?; - - let mut data = ChangeStreamData { - resume_token: ResumeToken::initial(self.args.options.as_ref(), &spec), - ..ChangeStreamData::default() - }; - let has_no_time = |o: &ChangeStreamOptions| { - o.start_at_operation_time.is_none() - && o.resume_after.is_none() - && o.start_after.is_none() - }; - if self.args.options.as_ref().map_or(true, has_no_time) - && description.max_wire_version.map_or(false, |v| v >= 7) - && spec.initial_buffer.is_empty() - && spec.post_batch_resume_token.is_none() - { - data.initial_operation_time = op_time; - } + session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let op_time = response + .raw_body() + .get("operationTime")? + .and_then(bson::RawBsonRef::as_timestamp); + let spec = self + .inner + .handle_response(response, description, session) + .get_sync_result()?; + + let mut data = ChangeStreamData { + resume_token: ResumeToken::initial(self.args.options.as_ref(), &spec), + ..ChangeStreamData::default() + }; + let has_no_time = |o: &ChangeStreamOptions| { + o.start_at_operation_time.is_none() + && o.resume_after.is_none() + && o.start_after.is_none() + }; + if self.args.options.as_ref().map_or(true, has_no_time) + && description.max_wire_version.map_or(false, |v| v >= 7) + && spec.initial_buffer.is_empty() + && spec.post_batch_resume_token.is_none() + { + data.initial_operation_time = op_time; + } - Ok((spec, data)) + Ok((spec, data)) + }} } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 6acc144ab..59b028358 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -1,152 +1,359 @@ #![allow(unused_variables, dead_code)] +mod server_responses; + use std::collections::HashMap; -use serde::Deserialize; +use futures_core::TryStream; +use futures_util::{FutureExt, TryStreamExt}; use crate::{ - bson::{rawdoc, Bson, RawArrayBuf, RawDocumentBuf}, - bson_util, - client::bulk_write::{models::WriteModel, BulkWriteOptions}, + action::bulk_write::{ + error::BulkWriteError, + results::BulkWriteResult, + write_models::{OperationType, WriteModel}, + BulkWriteOptions, + }, + bson::{rawdoc, Bson, RawDocumentBuf}, + bson_util::{self, array_entry_size_bytes, extend_raw_document_buf, vec_to_raw_array_buf}, cmap::{Command, RawCommandResponse, StreamDescription}, cursor::CursorSpecification, - error::{Error, Result}, + error::{Error, ErrorKind, Result}, operation::OperationWithDefaults, + results::{DeleteResult, InsertOneResult, UpdateResult}, Client, + ClientSession, Cursor, Namespace, + SessionCursor, +}; + +use super::{ + OperationResponse, + Retryability, + WriteResponseBody, + COMMAND_OVERHEAD_SIZE, + MAX_ENCRYPTED_WRITE_SIZE, }; -use super::{CursorInfo, WriteResponseBody}; +use server_responses::*; pub(crate) struct BulkWrite<'a> { - pub(crate) models: &'a [WriteModel], - pub(crate) options: BulkWriteOptions, - pub(crate) client: Client, + client: Client, + models: &'a [WriteModel], + offset: usize, + options: Option<&'a BulkWriteOptions>, + encrypted: bool, + /// The _ids of the inserted documents. This value is populated in `build`. + inserted_ids: HashMap, + /// The number of writes that were sent to the server. This value is populated in `build`. + pub(crate) n_attempted: usize, } + +impl<'a> BulkWrite<'a> { + pub(crate) async fn new( + client: Client, + models: &'a [WriteModel], + offset: usize, + options: Option<&'a BulkWriteOptions>, + ) -> Self { + let encrypted = client.should_auto_encrypt().await; + Self { + client, + models, + offset, + options, + encrypted, + n_attempted: 0, + inserted_ids: HashMap::new(), + } + } + + async fn iterate_results_cursor( + &self, + mut stream: impl TryStream + Unpin, + error: &mut BulkWriteError, + ) -> Result<()> { + let result = &mut error.partial_result; + + while let Some(response) = stream.try_next().await? { + match response.result { + SingleOperationResult::Success { + n, + n_modified, + upserted, + } => { + let result_index = response.index + self.offset; + let model = self.get_model(response.index)?; + match model.operation_type() { + OperationType::Insert => { + let inserted_id = self.get_inserted_id(result_index)?; + let insert_result = InsertOneResult { inserted_id }; + result + .get_or_insert_with(Default::default) + .add_insert_result(result_index, insert_result); + } + OperationType::Update => { + let modified_count = + n_modified.ok_or_else(|| ErrorKind::InvalidResponse { + message: "nModified value not returned for update bulkWrite \ + operation" + .into(), + })?; + let update_result = UpdateResult { + matched_count: n, + modified_count, + upserted_id: upserted.map(|upserted| upserted.id), + }; + result + .get_or_insert_with(Default::default) + .add_update_result(result_index, update_result); + } + OperationType::Delete => { + let delete_result = DeleteResult { deleted_count: n }; + result + .get_or_insert_with(Default::default) + .add_delete_result(result_index, delete_result); + } + } + } + SingleOperationResult::Error(write_error) => { + error.write_errors.insert(response.index, write_error); + } + } + } + + Ok(()) + } + + fn get_model(&self, index: usize) -> Result<&WriteModel> { + self.models.get(index).ok_or_else(|| { + ErrorKind::InvalidResponse { + message: format!("invalid operation index returned from bulkWrite: {}", index), + } + .into() + }) + } + + fn get_inserted_id(&self, index: usize) -> Result { + match self.inserted_ids.get(&index) { + Some(inserted_id) => Ok(inserted_id.clone()), + None => Err(ErrorKind::InvalidResponse { + message: format!("invalid index returned for insert operation: {}", index), + } + .into()), + } + } +} + /// A helper struct for tracking namespace information. struct NamespaceInfo<'a> { - namespaces: RawArrayBuf, - // Cache the namespaces and their indexes to avoid traversing the namespaces array each time a - // namespace is looked up or added. + namespaces: Vec, + /// Cache the namespaces and their indexes to avoid traversing the namespaces array each time a + /// namespace is looked up or added. cache: HashMap<&'a Namespace, usize>, } impl<'a> NamespaceInfo<'a> { fn new() -> Self { Self { - namespaces: RawArrayBuf::new(), + namespaces: Vec::new(), cache: HashMap::new(), } } /// Gets the index for the given namespace in the nsInfo list, adding it to the list if it is /// not already present. - fn get_index(&mut self, namespace: &'a Namespace) -> usize { + fn get_index(&mut self, namespace: &'a Namespace) -> (usize, usize) { match self.cache.get(namespace) { - Some(index) => *index, + Some(index) => (*index, 0), None => { - self.namespaces - .push(rawdoc! { "ns": namespace.to_string() }); + let namespace_doc = rawdoc! { "ns": namespace.to_string() }; + let length_added = namespace_doc.as_bytes().len(); + self.namespaces.push(namespace_doc); let next_index = self.cache.len(); self.cache.insert(namespace, next_index); - next_index + (next_index, length_added) } } } } impl<'a> OperationWithDefaults for BulkWrite<'a> { - type O = (Cursor, BulkWriteSummaryInfo); + type O = BulkWriteResult; type Command = RawDocumentBuf; const NAME: &'static str = "bulkWrite"; fn build(&mut self, description: &StreamDescription) -> Result> { + let max_operations = description.max_write_batch_size; + let max_doc_size = description.max_bson_object_size as usize; + let max_message_size = description.max_message_size_bytes as usize - COMMAND_OVERHEAD_SIZE; + let mut namespace_info = NamespaceInfo::new(); - let mut ops = RawArrayBuf::new(); - for model in self.models { - let namespace_index = namespace_info.get_index(model.namespace()); + let mut ops = Vec::new(); + let mut size = 0; + for (i, model) in self.models.iter().take(max_operations as usize).enumerate() { + let (namespace_index, namespace_size) = namespace_info.get_index(model.namespace()); - let mut model_doc = rawdoc! { model.operation_name(): namespace_index as i32 }; - let model_fields = model.to_raw_doc()?; - bson_util::extend_raw_document_buf(&mut model_doc, model_fields)?; + let mut operation = rawdoc! { model.operation_name(): namespace_index as i32 }; + let (model_doc, inserted_id) = model.get_ops_document_contents()?; + extend_raw_document_buf(&mut operation, model_doc)?; - ops.push(model_doc); + let operation_size = operation.as_bytes().len(); + if operation_size > max_doc_size { + return Err(ErrorKind::InvalidArgument { + message: format!( + "bulk write operations must be within {} bytes, but document provided is \ + {} bytes", + max_doc_size, operation_size + ), + } + .into()); + } + + if let Some(inserted_id) = inserted_id { + self.inserted_ids.insert(i, inserted_id); + } + + let mut split = false; + if self.encrypted && i != 0 { + let model_entry_size = array_entry_size_bytes(i, operation_size); + let namespace_entry_size = if namespace_size > 0 { + array_entry_size_bytes(namespace_index, namespace_size) + } else { + 0 + }; + if size + model_entry_size + namespace_entry_size > MAX_ENCRYPTED_WRITE_SIZE { + split = true; + } + } else if size + namespace_size + operation_size > max_message_size { + split = true; + } + + if split { + // Remove the namespace doc from the list if one was added for this operation + if namespace_size > 0 { + let last_index = namespace_info.namespaces.len() - 1; + namespace_info.namespaces.remove(last_index); + } + break; + } else { + size += namespace_size + operation_size; + ops.push(operation); + } } - let mut command = rawdoc! { - Self::NAME: 1, - "ops": ops, - "nsInfo": namespace_info.namespaces, - }; + let mut body = rawdoc! { Self::NAME: 1 }; + let options = match self.options { + Some(options) => bson::to_raw_document_buf(options), + None => bson::to_raw_document_buf(&BulkWriteOptions::default()), + }?; + bson_util::extend_raw_document_buf(&mut body, options)?; - let options = bson::to_raw_document_buf(&self.options)?; - bson_util::extend_raw_document_buf(&mut command, options)?; + self.n_attempted = ops.len(); - Ok(Command::new(Self::NAME, "admin", command)) + if self.encrypted { + body.append("nsInfo", vec_to_raw_array_buf(namespace_info.namespaces)); + body.append("ops", vec_to_raw_array_buf(ops)); + Ok(Command::new(Self::NAME, "admin", body)) + } else { + let mut command = Command::new(Self::NAME, "admin", body); + command.add_document_sequence("nsInfo", namespace_info.namespaces); + command.add_document_sequence("ops", ops); + Ok(command) + } } - fn handle_response( - &self, + fn handle_response<'b>( + &'b self, response: RawCommandResponse, - description: &StreamDescription, - ) -> Result { - let response: WriteResponseBody = response.body()?; - - let specification = CursorSpecification::new( - response.body.cursor, - description.server_address.clone(), - None, - None, - None, - ); - let cursor = Cursor::new(self.client.clone(), specification, None, None); - - Ok((cursor, response.body.summary)) - } + description: &'b StreamDescription, + session: Option<&'b mut ClientSession>, + ) -> OperationResponse<'b, Self::O> { + OperationResponse::Async( + async move { + let response: WriteResponseBody = response.body()?; - fn handle_error(&self, error: Error) -> Result { - Err(error) - } -} + let mut bulk_write_error = BulkWriteError::default(); -#[derive(Deserialize)] -struct BulkWriteResponse { - cursor: CursorInfo, - #[serde(flatten)] - summary: BulkWriteSummaryInfo, -} + // A partial result with summary info should only be created if one or more + // operations were successful. + if response.summary.n_errors < self.n_attempted as i64 { + bulk_write_error + .partial_result + .get_or_insert_with(Default::default) + .populate_summary_info(&response.summary); + } -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct BulkWriteSummaryInfo { - pub(crate) n_inserted: i64, - pub(crate) n_matched: i64, - pub(crate) n_modified: i64, - pub(crate) n_upserted: i64, - pub(crate) n_deleted: i64, -} + if let Some(write_concern_error) = response.write_concern_error { + bulk_write_error + .write_concern_errors + .push(write_concern_error); + } -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct BulkWriteOperationResponse { - #[serde(rename = "idx")] - pub(crate) index: usize, - pub(crate) n: u64, - pub(crate) n_modified: Option, - pub(crate) upserted: Option, -} + let specification = CursorSpecification::new( + response.body.cursor, + description.server_address.clone(), + None, + None, + self.options.and_then(|options| options.comment.clone()), + ); + let iteration_result = match session { + Some(session) => { + let mut session_cursor = + SessionCursor::new(self.client.clone(), specification, None); + self.iterate_results_cursor( + session_cursor.stream(session), + &mut bulk_write_error, + ) + .await + } + None => { + let cursor = Cursor::new(self.client.clone(), specification, None, None); + self.iterate_results_cursor(cursor, &mut bulk_write_error) + .await + } + }; -#[derive(Deserialize)] -pub(crate) struct UpsertedId { - #[serde(rename = "_id")] - pub(crate) id: Bson, -} + match iteration_result { + Ok(()) => { + if bulk_write_error.write_errors.is_empty() + && bulk_write_error.write_concern_errors.is_empty() + { + Ok(bulk_write_error.partial_result.unwrap_or_default()) + } else { + let error = Error::new( + ErrorKind::ClientBulkWrite(bulk_write_error), + response.labels, + ); + Err(error) + } + } + Err(error) => { + let error = Error::new( + ErrorKind::ClientBulkWrite(bulk_write_error), + response.labels, + ) + .with_source(error); + Err(error) + } + } + } + .boxed(), + ) + } + + fn handle_error(&self, error: Error) -> Result { + Err(error) + } -impl BulkWriteOperationResponse { - pub(crate) fn is_update_result(&self) -> bool { - self.n_modified.is_some() || self.upserted.is_some() + fn retryability(&self) -> Retryability { + if self.models.iter().any(|model| model.multi() == Some(true)) { + Retryability::None + } else { + Retryability::Write + } } } diff --git a/src/operation/commit_transaction.rs b/src/operation/commit_transaction.rs index 3c3d455d6..f035d673d 100644 --- a/src/operation/commit_transaction.rs +++ b/src/operation/commit_transaction.rs @@ -7,9 +7,10 @@ use crate::{ error::Result, operation::{append_options, remove_empty_write_concern, OperationWithDefaults, Retryability}, options::{Acknowledgment, TransactionOptions, WriteConcern}, + ClientSession, }; -use super::WriteConcernOnlyBody; +use super::{handle_response_sync, OperationResponse, WriteConcernOnlyBody}; pub(crate) struct CommitTransaction { options: Option, @@ -46,9 +47,12 @@ impl OperationWithDefaults for CommitTransaction { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: WriteConcernOnlyBody = response.body()?; - response.validate() + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: WriteConcernOnlyBody = response.body()?; + response.validate() + }} } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/count.rs b/src/operation/count.rs index 7091b1991..76a3fe3e7 100644 --- a/src/operation/count.rs +++ b/src/operation/count.rs @@ -8,8 +8,11 @@ use crate::{ error::{Error, Result}, operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::SelectionCriteria, + ClientSession, }; +use super::{handle_response_sync, OperationResponse}; + pub(crate) struct Count { ns: Namespace, options: Option, @@ -46,9 +49,12 @@ impl OperationWithDefaults for Count { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response_body: ResponseBody = response.body()?; - Ok(response_body.n) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response_body: ResponseBody = response.body()?; + Ok(response_body.n) + }} } fn handle_error(&self, error: Error) -> Result { diff --git a/src/operation/count_documents.rs b/src/operation/count_documents.rs index db7160394..515e8e3a8 100644 --- a/src/operation/count_documents.rs +++ b/src/operation/count_documents.rs @@ -2,16 +2,24 @@ use std::convert::TryInto; use serde::Deserialize; -use super::{OperationWithDefaults, Retryability, SingleCursorResult}; use crate::{ + bson::{doc, Document, RawDocument}, cmap::{Command, RawCommandResponse, StreamDescription}, error::{Error, ErrorKind, Result}, operation::aggregate::Aggregate, options::{AggregateOptions, CountOptions}, selection_criteria::SelectionCriteria, + ClientSession, Namespace, }; -use bson::{doc, Document, RawDocument}; + +use super::{ + handle_response_sync, + OperationResponse, + OperationWithDefaults, + Retryability, + SingleCursorResult, +}; pub(crate) struct CountDocuments { aggregate: Aggregate, @@ -91,9 +99,12 @@ impl OperationWithDefaults for CountDocuments { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: SingleCursorResult = response.body()?; - Ok(response.0.map(|r| r.n).unwrap_or(0)) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: SingleCursorResult = response.body()?; + Ok(response.0.map(|r| r.n).unwrap_or(0)) + }} } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/create.rs b/src/operation/create.rs index 533d96776..54f9f411c 100644 --- a/src/operation/create.rs +++ b/src/operation/create.rs @@ -11,9 +11,12 @@ use crate::{ WriteConcernOnlyBody, }, options::{CreateCollectionOptions, WriteConcern}, + ClientSession, Namespace, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Debug)] pub(crate) struct Create { ns: Namespace, @@ -51,9 +54,12 @@ impl OperationWithDefaults for Create { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: WriteConcernOnlyBody = response.body()?; - response.validate() + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: WriteConcernOnlyBody = response.body()?; + response.validate() + }} } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/create_indexes.rs b/src/operation/create_indexes.rs index 247e6cb40..317ddde0c 100644 --- a/src/operation/create_indexes.rs +++ b/src/operation/create_indexes.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod test; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -9,10 +6,11 @@ use crate::{ operation::{append_options, remove_empty_write_concern, OperationWithDefaults}, options::{CreateIndexOptions, WriteConcern}, results::CreateIndexesResult, + ClientSession, Namespace, }; -use super::WriteConcernOnlyBody; +use super::{handle_response_sync, OperationResponse, WriteConcernOnlyBody}; #[derive(Debug)] pub(crate) struct CreateIndexes { @@ -33,18 +31,6 @@ impl CreateIndexes { options, } } - - #[cfg(test)] - pub(crate) fn with_indexes(indexes: Vec) -> Self { - Self { - ns: Namespace { - db: String::new(), - coll: String::new(), - }, - indexes, - options: None, - } - } } impl OperationWithDefaults for CreateIndexes { @@ -89,11 +75,14 @@ impl OperationWithDefaults for CreateIndexes { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: WriteConcernOnlyBody = response.body()?; - response.validate()?; - let index_names = self.indexes.iter().filter_map(|i| i.get_name()).collect(); - Ok(CreateIndexesResult { index_names }) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: WriteConcernOnlyBody = response.body()?; + response.validate()?; + let index_names = self.indexes.iter().filter_map(|i| i.get_name()).collect(); + Ok(CreateIndexesResult { index_names }) + }} } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/create_indexes/test.rs b/src/operation/create_indexes/test.rs deleted file mode 100644 index 59d1ab6d2..000000000 --- a/src/operation/create_indexes/test.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::time::Duration; - -use crate::{ - bson::doc, - cmap::StreamDescription, - coll::{ - options::{CommitQuorum, CreateIndexOptions}, - Namespace, - }, - concern::WriteConcern, - index::{options::IndexOptions, IndexModel}, - operation::{test::handle_response_test, CreateIndexes, Operation}, - results::CreateIndexesResult, -}; - -#[test] -fn build() { - let ns = Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }; - - let index_options = IndexOptions::builder() - .name(Some("foo".to_string())) - .build(); - let index_model = IndexModel::builder() - .keys(doc! { "x": 1 }) - .options(Some(index_options)) - .build(); - let create_options = CreateIndexOptions::builder() - .commit_quorum(Some(CommitQuorum::Majority)) - .max_time(Some(Duration::from_millis(42))) - .write_concern(Some(WriteConcern::builder().journal(Some(true)).build())) - .build(); - let mut create_indexes = CreateIndexes::new(ns, vec![index_model], Some(create_options)); - - let cmd = create_indexes - .build(&StreamDescription::with_wire_version(10)) - .expect("CreateIndexes command failed to build when it should have succeeded."); - - assert_eq!( - cmd.body, - doc! { - "createIndexes": "test_coll", - "indexes": [{ - "key": { "x": 1 }, - "name": "foo" - }], - "commitQuorum": "majority", - "maxTimeMS": 42, - "writeConcern": { "j": true }, - } - ) -} - -#[test] -fn handle_success() { - let a = IndexModel::builder() - .keys(doc! { "a": 1 }) - .options(Some( - IndexOptions::builder().name(Some("a".to_string())).build(), - )) - .build(); - let b = IndexModel::builder() - .keys(doc! { "b": 1 }) - .options(Some( - IndexOptions::builder().name(Some("b".to_string())).build(), - )) - .build(); - let op = CreateIndexes::with_indexes(vec![a, b]); - - let response = doc! { - "ok": 1, - "createdCollectionAutomatically": false, - "numIndexesBefore": 1, - "numIndexesAfter": 3, - "commitQuorum": "votingMembers", - }; - - let expected_values = CreateIndexesResult { - index_names: vec!["a".to_string(), "b".to_string()], - }; - let actual_values = handle_response_test(&op, response).unwrap(); - assert_eq!(actual_values, expected_values); -} diff --git a/src/operation/delete.rs b/src/operation/delete.rs index 7458011d4..23fa2aaa5 100644 --- a/src/operation/delete.rs +++ b/src/operation/delete.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod test; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -16,8 +13,11 @@ use crate::{ }, options::{DeleteOptions, Hint, WriteConcern}, results::DeleteResult, + ClientSession, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Debug)] pub(crate) struct Delete { ns: Namespace, @@ -29,19 +29,6 @@ pub(crate) struct Delete { } impl Delete { - #[cfg(test)] - fn empty() -> Self { - Self::new( - Namespace { - db: String::new(), - coll: String::new(), - }, - Document::new(), - None, - None, - ) - } - pub(crate) fn new( ns: Namespace, filter: Document, @@ -99,13 +86,16 @@ impl OperationWithDefaults for Delete { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: WriteResponseBody = response.body()?; - response.validate().map_err(convert_bulk_errors)?; - - Ok(DeleteResult { - deleted_count: response.n, - }) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: WriteResponseBody = response.body()?; + response.validate().map_err(convert_bulk_errors)?; + + Ok(DeleteResult { + deleted_count: response.n, + }) + }} } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/delete/test.rs b/src/operation/delete/test.rs deleted file mode 100644 index 50f9427cf..000000000 --- a/src/operation/delete/test.rs +++ /dev/null @@ -1,196 +0,0 @@ -use pretty_assertions::assert_eq; - -use crate::{ - bson::doc, - bson_util, - cmap::StreamDescription, - concern::{Acknowledgment, WriteConcern}, - error::{ErrorKind, WriteConcernError, WriteError, WriteFailure}, - operation::{test::handle_response_test, Delete, Operation}, - options::DeleteOptions, - Namespace, -}; - -#[test] -fn build_many() { - let ns = Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }; - let filter = doc! { "x": { "$gt": 1 } }; - - let wc = WriteConcern { - w: Some(Acknowledgment::Majority), - ..Default::default() - }; - let options = DeleteOptions::builder().write_concern(wc).build(); - - let mut op = Delete::new(ns, filter.clone(), None, Some(options)); - - let description = StreamDescription::new_testing(); - let mut cmd = op.build(&description).unwrap(); - - assert_eq!(cmd.name.as_str(), "delete"); - assert_eq!(cmd.target_db.as_str(), "test_db"); - - let mut expected_body = doc! { - "delete": "test_coll", - "deletes": [ - { - "q": filter, - "limit": 0, - } - ], - "writeConcern": { - "w": "majority" - }, - "ordered": true, - }; - - bson_util::sort_document(&mut cmd.body); - bson_util::sort_document(&mut expected_body); - - assert_eq!(cmd.body, expected_body); -} - -#[test] -fn build_one() { - let ns = Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }; - let filter = doc! { "x": { "$gt": 1 } }; - - let wc = WriteConcern { - w: Some(Acknowledgment::Majority), - ..Default::default() - }; - let options = DeleteOptions::builder().write_concern(wc).build(); - - let mut op = Delete::new(ns, filter.clone(), Some(1), Some(options)); - - let description = StreamDescription::new_testing(); - let mut cmd = op.build(&description).unwrap(); - - assert_eq!(cmd.name.as_str(), "delete"); - assert_eq!(cmd.target_db.as_str(), "test_db"); - - let mut expected_body = doc! { - "delete": "test_coll", - "deletes": [ - { - "q": filter, - "limit": 1, - } - ], - "writeConcern": { - "w": "majority" - }, - "ordered": true, - }; - - bson_util::sort_document(&mut cmd.body); - bson_util::sort_document(&mut expected_body); - - assert_eq!(cmd.body, expected_body); -} - -#[test] -fn handle_success() { - let op = Delete::empty(); - - let delete_result = handle_response_test( - &op, - doc! { - "ok": 1.0, - "n": 3 - }, - ) - .expect("should succeed"); - assert_eq!(delete_result.deleted_count, 3); -} - -#[test] -fn handle_invalid_response() { - let op = Delete::empty(); - handle_response_test( - &op, - doc! { - "ok": 1.0, - "asffasdf": 123123 - }, - ) - .expect_err("should fail"); -} - -#[test] -fn handle_write_failure() { - let op = Delete::empty(); - - let write_error_response = doc! { - "ok": 1.0, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 1234, - "errmsg": "my error string" - } - ] - }; - let write_error = handle_response_test(&op, write_error_response).unwrap_err(); - match *write_error.kind { - ErrorKind::Write(WriteFailure::WriteError(ref error)) => { - let expected_err = WriteError { - code: 1234, - code_name: None, - message: "my error string".to_string(), - details: None, - }; - assert_eq!(error, &expected_err); - } - ref e => panic!("expected write error, got {:?}", e), - }; -} - -#[test] -fn handle_write_concern_failure() { - let op = Delete::empty(); - - let wc_error_response = doc! { - "ok": 1.0, - "n": 0, - "writeConcernError": { - "code": 456, - "codeName": "wcError", - "errmsg": "some message", - "errInfo": { - "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } - } - } - }; - - let wc_error = handle_response_test(&op, wc_error_response) - .expect_err("should fail with write concern error"); - match *wc_error.kind { - ErrorKind::Write(WriteFailure::WriteConcernError(ref wc_error)) => { - let expected_wc_err = WriteConcernError { - code: 456, - code_name: "wcError".to_string(), - message: "some message".to_string(), - details: Some(doc! { "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } }), - labels: vec![], - }; - assert_eq!(wc_error, &expected_wc_err); - } - ref e => panic!("expected write concern error, got {:?}", e), - } -} diff --git a/src/operation/distinct.rs b/src/operation/distinct.rs index 37bb37bae..ea48b7313 100644 --- a/src/operation/distinct.rs +++ b/src/operation/distinct.rs @@ -8,8 +8,11 @@ use crate::{ error::Result, operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::SelectionCriteria, + ClientSession, }; +use super::{handle_response_sync, OperationResponse}; + pub(crate) struct Distinct { ns: Namespace, field_name: String, @@ -72,9 +75,12 @@ impl OperationWithDefaults for Distinct { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: Response = response.body()?; - Ok(response.values) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: Response = response.body()?; + Ok(response.values) + }} } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/drop_collection.rs b/src/operation/drop_collection.rs index abd85f593..686c28565 100644 --- a/src/operation/drop_collection.rs +++ b/src/operation/drop_collection.rs @@ -11,9 +11,12 @@ use crate::{ WriteConcernOnlyBody, }, options::{DropCollectionOptions, WriteConcern}, + ClientSession, Namespace, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Debug)] pub(crate) struct DropCollection { ns: Namespace, @@ -51,9 +54,12 @@ impl OperationWithDefaults for DropCollection { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: WriteConcernOnlyBody = response.body()?; - response.validate() + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: WriteConcernOnlyBody = response.body()?; + response.validate() + }} } fn handle_error(&self, error: Error) -> Result { diff --git a/src/operation/drop_database.rs b/src/operation/drop_database.rs index cb1712e8c..3525345e8 100644 --- a/src/operation/drop_database.rs +++ b/src/operation/drop_database.rs @@ -12,8 +12,11 @@ use crate::{ WriteConcernOnlyBody, }, options::WriteConcern, + ClientSession, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Debug)] pub(crate) struct DropDatabase { target_db: String, @@ -51,9 +54,12 @@ impl OperationWithDefaults for DropDatabase { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: WriteConcernOnlyBody = response.body()?; - response.validate() + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: WriteConcernOnlyBody = response.body()?; + response.validate() + }} } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/drop_indexes.rs b/src/operation/drop_indexes.rs index dd7c9fbd5..8441f62ed 100644 --- a/src/operation/drop_indexes.rs +++ b/src/operation/drop_indexes.rs @@ -1,15 +1,15 @@ -#[cfg(test)] -mod test; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, operation::{append_options, remove_empty_write_concern, OperationWithDefaults}, options::{DropIndexOptions, WriteConcern}, + ClientSession, Namespace, }; +use super::{handle_response_sync, OperationResponse}; + pub(crate) struct DropIndexes { ns: Namespace, name: String, @@ -20,18 +20,6 @@ impl DropIndexes { pub(crate) fn new(ns: Namespace, name: String, options: Option) -> Self { Self { ns, name, options } } - - #[cfg(test)] - pub(crate) fn empty() -> Self { - Self { - ns: Namespace { - db: String::new(), - coll: String::new(), - }, - name: String::new(), - options: None, - } - } } impl OperationWithDefaults for DropIndexes { @@ -59,8 +47,9 @@ impl OperationWithDefaults for DropIndexes { &self, _response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - Ok(()) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ Ok(()) }} } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/drop_indexes/test.rs b/src/operation/drop_indexes/test.rs deleted file mode 100644 index 3a6b1fd13..000000000 --- a/src/operation/drop_indexes/test.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::time::Duration; - -use crate::{ - bson::doc, - cmap::StreamDescription, - coll::{options::DropIndexOptions, Namespace}, - concern::WriteConcern, - operation::{test::handle_response_test, DropIndexes, Operation}, -}; - -#[test] -fn build() { - let ns = Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }; - - let options = DropIndexOptions::builder() - .max_time(Some(Duration::from_secs(1))) - .write_concern(Some(WriteConcern::builder().journal(Some(true)).build())) - .build(); - - let mut drop_index = DropIndexes::new(ns, "foo".to_string(), Some(options)); - let cmd = drop_index - .build(&StreamDescription::new_testing()) - .expect("DropIndex command failed to build when it should have succeeded."); - assert_eq!( - cmd.body, - doc! { - "dropIndexes": "test_coll", - "index": "foo", - "maxTimeMS": 1000, - "writeConcern": { "j": true }, - } - ) -} - -#[test] -fn handle_success() { - let op = DropIndexes::empty(); - let response = doc! { "ok": 1 }; - handle_response_test(&op, response).unwrap(); -} diff --git a/src/operation/find.rs b/src/operation/find.rs index 947fc23cc..647362f7b 100644 --- a/src/operation/find.rs +++ b/src/operation/find.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod test; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -14,9 +11,12 @@ use crate::{ SERVER_4_4_0_WIRE_VERSION, }, options::{CursorType, FindOptions, SelectionCriteria}, + ClientSession, Namespace, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Debug)] pub(crate) struct Find { ns: Namespace, @@ -25,18 +25,6 @@ pub(crate) struct Find { } impl Find { - #[cfg(test)] - fn empty() -> Self { - Self::new( - Namespace { - db: String::new(), - coll: String::new(), - }, - None, - None, - ) - } - pub(crate) fn new( ns: Namespace, filter: Option, @@ -122,25 +110,28 @@ impl OperationWithDefaults for Find { &self, response: RawCommandResponse, description: &StreamDescription, - ) -> Result { - let response: CursorBody = response.body()?; - - // The comment should only be propagated to getMore calls on 4.4+. - let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { - None - } else { - self.options - .as_ref() - .and_then(|opts| opts.comment_bson.clone()) - }; + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: CursorBody = response.body()?; + + // The comment should only be propagated to getMore calls on 4.4+. + let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { + None + } else { + self.options + .as_ref() + .and_then(|opts| opts.comment_bson.clone()) + }; - Ok(CursorSpecification::new( - response.cursor, - description.server_address.clone(), - self.options.as_ref().and_then(|opts| opts.batch_size), - self.options.as_ref().and_then(|opts| opts.max_await_time), - comment, - )) + Ok(CursorSpecification::new( + response.cursor, + description.server_address.clone(), + self.options.as_ref().and_then(|opts| opts.batch_size), + self.options.as_ref().and_then(|opts| opts.max_await_time), + comment, + )) + }} } fn supports_read_concern(&self, _description: &StreamDescription) -> bool { diff --git a/src/operation/find/test.rs b/src/operation/find/test.rs deleted file mode 100644 index 57bb6dd28..000000000 --- a/src/operation/find/test.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::time::Duration; - -use crate::{ - bson::doc, - operation::{ - test::{self, handle_response_test}, - Find, - }, - options::{CursorType, FindOptions}, - Namespace, -}; - -#[test] -fn op_selection_criteria() { - test::op_selection_criteria(|selection_criteria| { - let options = FindOptions { - selection_criteria, - ..Default::default() - }; - Find::new(Namespace::empty(), None, Some(options)) - }); -} - -fn verify_max_await_time(max_await_time: Option, cursor_type: Option) { - let ns = Namespace::empty(); - let find = Find::new( - ns, - None, - Some(FindOptions { - cursor_type, - max_await_time, - ..Default::default() - }), - ); - - let spec = handle_response_test( - &find, - doc! { - "cursor": { - "id": 123, - "ns": "a.b", - "firstBatch": [], - }, - "ok": 1 - }, - ) - .unwrap(); - assert_eq!(spec.max_time(), max_await_time); -} - -#[test] -fn handle_max_await_time() { - verify_max_await_time(None, None); - verify_max_await_time(Some(Duration::from_millis(5)), None); - verify_max_await_time( - Some(Duration::from_millis(5)), - Some(CursorType::NonTailable), - ); - verify_max_await_time(Some(Duration::from_millis(5)), Some(CursorType::Tailable)); - verify_max_await_time( - Some(Duration::from_millis(5)), - Some(CursorType::TailableAwait), - ); -} - -#[test] -fn handle_invalid_response() { - let find = Find::empty(); - - let garbled = doc! { "asdfasf": "ASdfasdf" }; - handle_response_test(&find, garbled).unwrap_err(); - - let missing_cursor_field = doc! { - "cursor": { - "ns": "test.test", - "firstBatch": [], - } - }; - handle_response_test(&find, missing_cursor_field).unwrap_err(); -} diff --git a/src/operation/find_and_modify.rs b/src/operation/find_and_modify.rs index 04e6a538d..924d67909 100644 --- a/src/operation/find_and_modify.rs +++ b/src/operation/find_and_modify.rs @@ -28,8 +28,11 @@ use crate::{ Retryability, }, options::WriteConcern, + ClientSession, }; +use super::{handle_response_sync, OperationResponse}; + pub(crate) struct FindAndModify<'a, R, T: DeserializeOwned> { ns: Namespace, query: Document, @@ -142,25 +145,28 @@ impl<'a, R: Serialize, T: DeserializeOwned> OperationWithDefaults for FindAndMod &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - #[derive(Debug, Deserialize)] - pub(crate) struct Response { - value: RawBson, - } - let response: Response = response.body()?; - - match response.value { - RawBson::Document(doc) => Ok(Some(from_slice(doc.as_bytes())?)), - RawBson::Null => Ok(None), - other => Err(ErrorKind::InvalidResponse { - message: format!( - "expected document for value field of findAndModify response, but instead got \ - {:?}", - other - ), + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + #[derive(Debug, Deserialize)] + pub(crate) struct Response { + value: RawBson, } - .into()), - } + let response: Response = response.body()?; + + match response.value { + RawBson::Document(doc) => Ok(Some(from_slice(doc.as_bytes())?)), + RawBson::Null => Ok(None), + other => Err(ErrorKind::InvalidResponse { + message: format!( + "expected document for value field of findAndModify response, but instead \ + got {:?}", + other + ), + } + .into()), + } + }} } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/get_more.rs b/src/operation/get_more.rs index 2fb4f77a8..f1dc91a06 100644 --- a/src/operation/get_more.rs +++ b/src/operation/get_more.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod test; - use std::{collections::VecDeque, time::Duration}; use bson::{Document, RawDocumentBuf}; @@ -15,9 +12,12 @@ use crate::{ operation::OperationWithDefaults, options::SelectionCriteria, results::GetMoreResult, + ClientSession, Namespace, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Debug)] pub(crate) struct GetMore<'conn> { ns: Namespace, @@ -88,16 +88,21 @@ impl<'conn> OperationWithDefaults for GetMore<'conn> { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: GetMoreResponseBody = response.body()?; - - Ok(GetMoreResult { - batch: response.cursor.next_batch, - exhausted: response.cursor.id == 0, - post_batch_resume_token: ResumeToken::from_raw(response.cursor.post_batch_resume_token), - id: response.cursor.id, - ns: Namespace::from_str(response.cursor.ns.as_str()).unwrap(), - }) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: GetMoreResponseBody = response.body()?; + + Ok(GetMoreResult { + batch: response.cursor.next_batch, + exhausted: response.cursor.id == 0, + post_batch_resume_token: ResumeToken::from_raw( + response.cursor.post_batch_resume_token, + ), + id: response.cursor.id, + ns: Namespace::from_str(response.cursor.ns.as_str()).unwrap(), + }) + }} } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/get_more/test.rs b/src/operation/get_more/test.rs deleted file mode 100644 index d93b2a746..000000000 --- a/src/operation/get_more/test.rs +++ /dev/null @@ -1,47 +0,0 @@ -use crate::{ - cursor::CursorInformation, - operation::{GetMore, Operation}, - options::ServerAddress, - sdam::{ServerDescription, ServerInfo, ServerType}, - Namespace, -}; - -#[test] -fn op_selection_criteria() { - let address = ServerAddress::Tcp { - host: "myhost.com".to_string(), - port: Some(1234), - }; - - let info = CursorInformation { - ns: Namespace::empty(), - address: address.clone(), - id: 123, - batch_size: None, - max_time: None, - comment: None, - }; - let get_more = GetMore::new(info, None); - let server_description = ServerDescription { - address, - server_type: ServerType::Unknown, - reply: Ok(None), - last_update_time: None, - average_round_trip_time: None, - }; - let server_info = ServerInfo::new_borrowed(&server_description); - - let predicate = get_more - .selection_criteria() - .expect("should not be none") - .as_predicate() - .expect("should be predicate"); - assert!(predicate(&server_info)); - - let server_description = ServerDescription { - address: ServerAddress::default(), - ..server_description - }; - let server_info = ServerInfo::new_borrowed(&server_description); - assert!(!predicate(&server_info)); -} diff --git a/src/operation/insert.rs b/src/operation/insert.rs index 083fb7a87..82f6ec4f9 100644 --- a/src/operation/insert.rs +++ b/src/operation/insert.rs @@ -1,24 +1,32 @@ -#[cfg(test)] -mod test; +use std::collections::HashMap; -use std::{collections::HashMap, convert::TryInto}; - -use bson::{oid::ObjectId, Bson, RawArrayBuf, RawDocumentBuf}; +use bson::{Bson, RawDocumentBuf}; use serde::Serialize; use crate::{ bson::rawdoc, - bson_util, + bson_util::{ + array_entry_size_bytes, + extend_raw_document_buf, + get_or_prepend_id_field, + vec_to_raw_array_buf, + }, cmap::{Command, RawCommandResponse, StreamDescription}, error::{BulkWriteFailure, Error, ErrorKind, Result}, operation::{OperationWithDefaults, Retryability, WriteResponseBody}, options::{InsertManyOptions, WriteConcern}, results::InsertManyResult, serde_util, + ClientSession, Namespace, }; -use super::{COMMAND_OVERHEAD_SIZE, MAX_ENCRYPTED_WRITE_SIZE}; +use super::{ + handle_response_sync, + OperationResponse, + COMMAND_OVERHEAD_SIZE, + MAX_ENCRYPTED_WRITE_SIZE, +}; #[derive(Debug)] pub(crate) struct Insert<'a, T> { @@ -64,9 +72,9 @@ impl<'a, T: Serialize> OperationWithDefaults for Insert<'a, T> { let mut docs = Vec::new(); let mut size = 0; - let max_doc_size = description.max_bson_object_size as u64; + let max_doc_size = description.max_bson_object_size as usize; let max_doc_sequence_size = - description.max_message_size_bytes as u64 - COMMAND_OVERHEAD_SIZE; + description.max_message_size_bytes as usize - COMMAND_OVERHEAD_SIZE; for (i, d) in self .documents @@ -76,31 +84,9 @@ impl<'a, T: Serialize> OperationWithDefaults for Insert<'a, T> { { let mut doc = serde_util::to_raw_document_buf_with_options(d, self.human_readable_serialization)?; - let id = match doc.get("_id")? { - Some(b) => b.try_into()?, - None => { - let mut new_doc = RawDocumentBuf::new(); - let oid = ObjectId::new(); - new_doc.append("_id", oid); - - let mut new_bytes = new_doc.into_bytes(); - new_bytes.pop(); // remove trailing null byte - - let mut bytes = doc.into_bytes(); - let oid_slice = &new_bytes[4..]; - // insert oid at beginning of document - bytes.splice(4..4, oid_slice.iter().cloned()); - - // overwrite old length - let new_length = (bytes.len() as i32).to_le_bytes(); - bytes[0..4].copy_from_slice(&new_length); - doc = RawDocumentBuf::from_bytes(bytes)?; - - Bson::ObjectId(oid) - } - }; + let id = get_or_prepend_id_field(&mut doc)?; - let doc_size = doc.as_bytes().len() as u64; + let doc_size = doc.as_bytes().len(); if doc_size > max_doc_size { return Err(ErrorKind::InvalidArgument { message: format!( @@ -116,7 +102,7 @@ impl<'a, T: Serialize> OperationWithDefaults for Insert<'a, T> { // automatic encryption. I.e. if a single document has size larger than 2MiB (but less // than `maxBsonObjectSize`) proceed with automatic encryption. if self.encrypted && i != 0 { - let doc_entry_size = bson_util::array_entry_size_bytes(i, doc.as_bytes().len()); + let doc_entry_size = array_entry_size_bytes(i, doc.as_bytes().len()); if size + doc_entry_size >= MAX_ENCRYPTED_WRITE_SIZE { break; } @@ -134,15 +120,11 @@ impl<'a, T: Serialize> OperationWithDefaults for Insert<'a, T> { }; let options_doc = bson::to_raw_document_buf(&self.options)?; - bson_util::extend_raw_document_buf(&mut body, options_doc)?; + extend_raw_document_buf(&mut body, options_doc)?; if self.encrypted { // Auto-encryption does not support document sequences - let mut raw_array = RawArrayBuf::new(); - for doc in docs { - raw_array.push(doc); - } - body.append("documents", raw_array); + body.append("documents", vec_to_raw_array_buf(docs)); Ok(Command::new(Self::NAME, &self.ns.db, body)) } else { let mut command = Command::new(Self::NAME, &self.ns.db, body); @@ -155,46 +137,49 @@ impl<'a, T: Serialize> OperationWithDefaults for Insert<'a, T> { &self, raw_response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: WriteResponseBody = raw_response.body_utf8_lossy()?; - - let mut map = HashMap::new(); - if self.options.ordered == Some(true) { - // in ordered inserts, only the first n were attempted. - for (i, id) in self - .inserted_ids - .iter() - .enumerate() - .take(response.n as usize) - { - map.insert(i, id.clone()); - } - } else { - // for unordered, add all the attempted ids and then remove the ones that have - // associated write errors. - for (i, id) in self.inserted_ids.iter().enumerate() { - map.insert(i, id.clone()); - } + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: WriteResponseBody = raw_response.body_utf8_lossy()?; + + let mut map = HashMap::new(); + if self.options.ordered == Some(true) { + // in ordered inserts, only the first n were attempted. + for (i, id) in self + .inserted_ids + .iter() + .enumerate() + .take(response.n as usize) + { + map.insert(i, id.clone()); + } + } else { + // for unordered, add all the attempted ids and then remove the ones that have + // associated write errors. + for (i, id) in self.inserted_ids.iter().enumerate() { + map.insert(i, id.clone()); + } - if let Some(write_errors) = response.write_errors.as_ref() { - for err in write_errors { - map.remove(&err.index); + if let Some(write_errors) = response.write_errors.as_ref() { + for err in write_errors { + map.remove(&err.index); + } } } - } - if response.write_errors.is_some() || response.write_concern_error.is_some() { - return Err(Error::new( - ErrorKind::BulkWrite(BulkWriteFailure { - write_errors: response.write_errors, - write_concern_error: response.write_concern_error, - inserted_ids: map, - }), - response.labels, - )); - } + if response.write_errors.is_some() || response.write_concern_error.is_some() { + return Err(Error::new( + ErrorKind::BulkWrite(BulkWriteFailure { + write_errors: response.write_errors, + write_concern_error: response.write_concern_error, + inserted_ids: map, + }), + response.labels, + )); + } - Ok(InsertManyResult { inserted_ids: map }) + Ok(InsertManyResult { inserted_ids: map }) + }} } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/list_collections.rs b/src/operation/list_collections.rs index e8cfa163d..5b82894b3 100644 --- a/src/operation/list_collections.rs +++ b/src/operation/list_collections.rs @@ -5,8 +5,11 @@ use crate::{ error::Result, operation::{append_options, CursorBody, OperationWithDefaults, Retryability}, options::{ListCollectionsOptions, ReadPreference, SelectionCriteria}, + ClientSession, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Debug)] pub(crate) struct ListCollections { db: String, @@ -56,15 +59,18 @@ impl OperationWithDefaults for ListCollections { &self, raw_response: RawCommandResponse, description: &StreamDescription, - ) -> Result { - let response: CursorBody = raw_response.body()?; - Ok(CursorSpecification::new( - response.cursor, - description.server_address.clone(), - self.options.as_ref().and_then(|opts| opts.batch_size), - None, - None, - )) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: CursorBody = raw_response.body()?; + Ok(CursorSpecification::new( + response.cursor, + description.server_address.clone(), + self.options.as_ref().and_then(|opts| opts.batch_size), + None, + None, + )) + }} } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/list_databases.rs b/src/operation/list_databases.rs index 1a82a7cf8..663d34e62 100644 --- a/src/operation/list_databases.rs +++ b/src/operation/list_databases.rs @@ -8,8 +8,11 @@ use crate::{ error::Result, operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::{ReadPreference, SelectionCriteria}, + ClientSession, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Debug)] pub(crate) struct ListDatabases { name_only: bool, @@ -47,9 +50,12 @@ impl OperationWithDefaults for ListDatabases { &self, raw_response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: Response = raw_response.body()?; - Ok(response.databases) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: Response = raw_response.body()?; + Ok(response.databases) + }} } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/list_indexes.rs b/src/operation/list_indexes.rs index b8986816a..c5543e493 100644 --- a/src/operation/list_indexes.rs +++ b/src/operation/list_indexes.rs @@ -6,13 +6,11 @@ use crate::{ operation::{append_options, OperationWithDefaults}, options::ListIndexesOptions, selection_criteria::{ReadPreference, SelectionCriteria}, + ClientSession, Namespace, }; -use super::{CursorBody, Retryability}; - -#[cfg(test)] -mod test; +use super::{handle_response_sync, CursorBody, OperationResponse, Retryability}; pub(crate) struct ListIndexes { ns: Namespace, @@ -23,17 +21,6 @@ impl ListIndexes { pub(crate) fn new(ns: Namespace, options: Option) -> Self { ListIndexes { ns, options } } - - #[cfg(test)] - pub(crate) fn empty() -> Self { - Self { - ns: Namespace { - db: String::new(), - coll: String::new(), - }, - options: None, - } - } } impl OperationWithDefaults for ListIndexes { @@ -62,15 +49,18 @@ impl OperationWithDefaults for ListIndexes { &self, raw_response: RawCommandResponse, description: &StreamDescription, - ) -> Result { - let response: CursorBody = raw_response.body()?; - Ok(CursorSpecification::new( - response.cursor, - description.server_address.clone(), - self.options.as_ref().and_then(|o| o.batch_size), - self.options.as_ref().and_then(|o| o.max_time), - None, - )) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: CursorBody = raw_response.body()?; + Ok(CursorSpecification::new( + response.cursor, + description.server_address.clone(), + self.options.as_ref().and_then(|o| o.batch_size), + self.options.as_ref().and_then(|o| o.max_time), + None, + )) + }} } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/list_indexes/test.rs b/src/operation/list_indexes/test.rs deleted file mode 100644 index 317327313..000000000 --- a/src/operation/list_indexes/test.rs +++ /dev/null @@ -1,90 +0,0 @@ -use std::time::Duration; - -use crate::{ - bson::doc, - client::options::ServerAddress, - cmap::StreamDescription, - operation::{test::handle_response_test, ListIndexes, Operation}, - options::{IndexOptions, IndexVersion, ListIndexesOptions, TextIndexVersion}, - IndexModel, - Namespace, -}; - -#[test] -fn build() { - let ns = Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }; - - let list_options = ListIndexesOptions::builder() - .max_time(Some(Duration::from_millis(42))) - .batch_size(Some(4)) - .build(); - let mut list_indexes = ListIndexes::new(ns, Some(list_options)); - - let cmd = list_indexes - .build(&StreamDescription::new_testing()) - .expect("ListIndexes command failed to build when it should have succeeded."); - - assert_eq!( - cmd.body, - doc! { - "listIndexes": "test_coll", - "maxTimeMS": 42, - "cursor": doc! { - "batchSize": 4, - }, - } - ); -} - -#[test] -fn handle_success() { - let op = ListIndexes::empty(); - - let first_batch = vec![ - IndexModel::builder() - .keys(doc! {"x": 1}) - .options(Some( - IndexOptions::builder() - .version(Some(IndexVersion::V1)) - .name(Some("foo".to_string())) - .sparse(Some(false)) - .build(), - )) - .build(), - IndexModel::builder() - .keys(doc! {"y": 1, "z": -1}) - .options(Some( - IndexOptions::builder() - .version(Some(IndexVersion::V1)) - .name(Some("x_1_z_-1".to_string())) - .text_index_version(Some(TextIndexVersion::V3)) - .default_language(Some("spanish".to_string())) - .build(), - )) - .build(), - ]; - - let response = doc! { - "cursor": { - "id": 123, - "ns": "test_db.test_coll", - "firstBatch": bson::to_bson(&first_batch).unwrap(), - }, - "ok": 1, - }; - - let cursor_spec = handle_response_test(&op, response).unwrap(); - - assert_eq!(cursor_spec.id(), 123); - assert_eq!(cursor_spec.address(), &ServerAddress::default()); - assert_eq!(cursor_spec.batch_size(), None); - assert_eq!(cursor_spec.max_time(), None); - - assert_eq!( - bson::to_bson(&cursor_spec.initial_buffer).unwrap(), - bson::to_bson(&first_batch).unwrap(), - ); -} diff --git a/src/operation/raw_output.rs b/src/operation/raw_output.rs index 254ea316b..0ae35c1d1 100644 --- a/src/operation/raw_output.rs +++ b/src/operation/raw_output.rs @@ -1,9 +1,10 @@ use crate::{ cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, + ClientSession, }; -use super::Operation; +use super::{handle_response_sync, Operation, OperationResponse}; /// Forwards all implementation to the wrapped `Operation`, but returns the response unparsed and /// unvalidated as a `RawCommandResponse`. @@ -30,8 +31,9 @@ impl Operation for RawOutput { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - Ok(response) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ Ok(response) }} } fn handle_error(&self, error: crate::error::Error) -> Result { diff --git a/src/operation/run_command.rs b/src/operation/run_command.rs index 427f00214..e08cdcc16 100644 --- a/src/operation/run_command.rs +++ b/src/operation/run_command.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod test; - use std::convert::TryInto; use bson::{RawBsonRef, RawDocumentBuf}; @@ -12,8 +9,11 @@ use crate::{ cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, error::{ErrorKind, Result}, selection_criteria::SelectionCriteria, + ClientSession, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Debug, Clone)] pub(crate) struct RunCommand<'conn> { db: String, @@ -98,8 +98,9 @@ impl<'conn> OperationWithDefaults for RunCommand<'conn> { &self, response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - Ok(response.into_raw_document_buf().try_into()?) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ Ok(response.into_raw_document_buf().try_into()?) }} } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/run_command/test.rs b/src/operation/run_command/test.rs deleted file mode 100644 index 467ccf5e1..000000000 --- a/src/operation/run_command/test.rs +++ /dev/null @@ -1,24 +0,0 @@ -use bson::Timestamp; - -use super::RunCommand; -use crate::{bson::doc, operation::test::handle_response_test}; - -#[test] -fn handle_success() { - let op = RunCommand::new("foo".into(), doc! { "hello": 1 }, None, None).unwrap(); - - let doc = doc! { - "ok": 1, - "some": "field", - "other": true, - "$clusterTime": { - "clusterTime": Timestamp { - time: 123, - increment: 345, - }, - "signature": {} - } - }; - let result_doc = handle_response_test(&op, doc.clone()).unwrap(); - assert_eq!(result_doc, doc); -} diff --git a/src/operation/run_cursor_command.rs b/src/operation/run_cursor_command.rs index 756b8c471..e5d6093ab 100644 --- a/src/operation/run_cursor_command.rs +++ b/src/operation/run_cursor_command.rs @@ -8,8 +8,11 @@ use crate::{ operation::{run_command::RunCommand, CursorBody, Operation}, options::RunCursorCommandOptions, selection_criteria::SelectionCriteria, + ClientSession, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Debug, Clone)] pub(crate) struct RunCursorCommand<'conn> { run_command: RunCommand<'conn>, @@ -89,20 +92,23 @@ impl<'conn> Operation for RunCursorCommand<'conn> { &self, response: RawCommandResponse, description: &StreamDescription, - ) -> Result { - let cursor_response: CursorBody = response.body()?; - - let comment = match &self.options { - Some(options) => options.comment.clone(), - None => None, - }; - - Ok(CursorSpecification::new( - cursor_response.cursor, - description.server_address.clone(), - self.options.as_ref().and_then(|opts| opts.batch_size), - self.options.as_ref().and_then(|opts| opts.max_time), - comment, - )) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let cursor_response: CursorBody = response.body()?; + + let comment = match &self.options { + Some(options) => options.comment.clone(), + None => None, + }; + + Ok(CursorSpecification::new( + cursor_response.cursor, + description.server_address.clone(), + self.options.as_ref().and_then(|opts| opts.batch_size), + self.options.as_ref().and_then(|opts| opts.max_time), + comment, + )) + }} } } diff --git a/src/operation/search_index.rs b/src/operation/search_index.rs index 986d1395b..439c46e55 100644 --- a/src/operation/search_index.rs +++ b/src/operation/search_index.rs @@ -1,9 +1,15 @@ use bson::{doc, Document}; use serde::Deserialize; -use crate::{cmap::Command, error::Result, Namespace, SearchIndexModel}; +use crate::{ + cmap::{Command, RawCommandResponse, StreamDescription}, + error::Result, + ClientSession, + Namespace, + SearchIndexModel, +}; -use super::OperationWithDefaults; +use super::{handle_response_sync, OperationResponse, OperationWithDefaults}; #[derive(Debug)] pub(crate) struct CreateSearchIndexes { @@ -35,28 +41,31 @@ impl OperationWithDefaults for CreateSearchIndexes { fn handle_response( &self, - response: crate::cmap::RawCommandResponse, - _description: &crate::cmap::StreamDescription, - ) -> Result { - #[derive(Debug, Deserialize)] - #[serde(rename_all = "camelCase")] - struct Response { - indexes_created: Vec, - } - - #[derive(Debug, Deserialize)] - struct CreatedIndex { - #[allow(unused)] - id: String, - name: String, - } - - let response: Response = response.body()?; - Ok(response - .indexes_created - .into_iter() - .map(|ci| ci.name) - .collect()) + response: RawCommandResponse, + _description: &StreamDescription, + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + #[derive(Debug, Deserialize)] + #[serde(rename_all = "camelCase")] + struct Response { + indexes_created: Vec, + } + + #[derive(Debug, Deserialize)] + struct CreatedIndex { + #[allow(unused)] + id: String, + name: String, + } + + let response: Response = response.body()?; + Ok(response + .indexes_created + .into_iter() + .map(|ci| ci.name) + .collect()) + }} } fn supports_sessions(&self) -> bool { @@ -107,10 +116,11 @@ impl OperationWithDefaults for UpdateSearchIndex { fn handle_response( &self, - _response: crate::cmap::RawCommandResponse, - _description: &crate::cmap::StreamDescription, - ) -> crate::error::Result { - Ok(()) + _response: RawCommandResponse, + _description: &StreamDescription, + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ Ok(()) }} } fn supports_sessions(&self) -> bool { @@ -155,10 +165,11 @@ impl OperationWithDefaults for DropSearchIndex { fn handle_response( &self, - _response: crate::cmap::RawCommandResponse, - _description: &crate::cmap::StreamDescription, - ) -> Result { - Ok(()) + _response: RawCommandResponse, + _description: &StreamDescription, + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ Ok(()) }} } fn handle_error(&self, error: crate::error::Error) -> Result { diff --git a/src/operation/test.rs b/src/operation/test.rs deleted file mode 100644 index 3dacc95b9..000000000 --- a/src/operation/test.rs +++ /dev/null @@ -1,149 +0,0 @@ -use bson::{doc, Document, Timestamp}; -use serde::Deserialize; - -use crate::{ - client::ClusterTime, - cmap::{RawCommandResponse, StreamDescription}, - error::{Result, TRANSIENT_TRANSACTION_ERROR}, - operation::{CommandErrorBody, CommandResponse, Operation}, - options::{ReadPreference, SelectionCriteria}, -}; - -pub(crate) fn handle_response_test(op: &T, response_doc: Document) -> Result { - let raw = RawCommandResponse::with_document(response_doc).unwrap(); - op.handle_response(raw, &StreamDescription::new_testing()) -} - -pub(crate) fn op_selection_criteria(constructor: F) -where - T: Operation, - F: Fn(Option) -> T, -{ - let op = constructor(None); - assert_eq!(op.selection_criteria(), None); - - let read_pref: SelectionCriteria = ReadPreference::Secondary { - options: Default::default(), - } - .into(); - - let op = constructor(Some(read_pref.clone())); - assert_eq!(op.selection_criteria(), Some(&read_pref)); -} - -#[test] -fn response_success() { - let cluster_timestamp = Timestamp { - time: 123, - increment: 345, - }; - let doc = doc! { - "ok": 1, - "some": "field", - "other": true, - "$clusterTime": { - "clusterTime": cluster_timestamp, - "signature": {} - } - }; - let raw = RawCommandResponse::with_document(doc.clone()).unwrap(); - let response: CommandResponse = raw.body().unwrap(); - - assert!(response.is_success()); - assert_eq!( - response.cluster_time(), - Some(&ClusterTime { - cluster_time: cluster_timestamp, - signature: doc! {}, - }) - ); - assert_eq!(response.body, doc! { "some": "field", "other": true }); - - #[derive(Deserialize, Debug, PartialEq)] - struct Body { - some: String, - #[serde(rename = "other")] - o: bool, - #[serde(default)] - default: Option, - } - - let raw = RawCommandResponse::with_document(doc).unwrap(); - let response: CommandResponse = raw.body().unwrap(); - - assert!(response.is_success()); - assert_eq!( - response.cluster_time(), - Some(&ClusterTime { - cluster_time: cluster_timestamp, - signature: doc! {}, - }) - ); - assert_eq!( - response.body, - Body { - some: "field".to_string(), - o: true, - default: None, - } - ); -} - -#[test] -fn response_failure() { - let cluster_timestamp = Timestamp { - time: 123, - increment: 345, - }; - let doc = doc! { - "ok": 0, - "code": 123, - "codeName": "name", - "errmsg": "some message", - "errorLabels": [TRANSIENT_TRANSACTION_ERROR], - "$clusterTime": { - "clusterTime": cluster_timestamp, - "signature": {} - } - }; - let raw = RawCommandResponse::with_document(doc.clone()).unwrap(); - let response: CommandResponse = raw.body().unwrap(); - - assert!(!response.is_success()); - assert_eq!( - response.cluster_time(), - Some(&ClusterTime { - cluster_time: cluster_timestamp, - signature: doc! {}, - }) - ); - assert_eq!( - response.body, - doc! { - "code": 123, - "codeName": "name", - "errmsg": "some message", - "errorLabels": [TRANSIENT_TRANSACTION_ERROR], - } - ); - - let raw = RawCommandResponse::with_document(doc).unwrap(); - let response: CommandResponse = raw.body().unwrap(); - - assert!(!response.is_success()); - assert_eq!( - response.cluster_time(), - Some(&ClusterTime { - cluster_time: cluster_timestamp, - signature: doc! {}, - }) - ); - let command_error = response.body; - assert_eq!(command_error.command_error.code, 123); - assert_eq!(command_error.command_error.code_name, "name"); - assert_eq!(command_error.command_error.message, "some message"); - assert_eq!( - command_error.error_labels, - Some(vec![TRANSIENT_TRANSACTION_ERROR.to_string()]) - ); -} diff --git a/src/operation/update.rs b/src/operation/update.rs index 8750ef774..e870e9e5f 100644 --- a/src/operation/update.rs +++ b/src/operation/update.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod test; - use serde::{Deserialize, Serialize}; use crate::{ @@ -12,9 +9,12 @@ use crate::{ options::{UpdateModifications, UpdateOptions, WriteConcern}, results::UpdateResult, serde_util::to_raw_document_buf_with_options, + ClientSession, Namespace, }; +use super::{handle_response_sync, OperationResponse}; + #[derive(Clone, Debug)] pub(crate) enum UpdateOrReplace<'a, T = ()> { UpdateModifications(UpdateModifications), @@ -63,18 +63,6 @@ pub(crate) struct Update<'a, T = ()> { } impl Update<'_> { - #[cfg(test)] - fn empty() -> Self { - Self::with_update( - Namespace::new("db", "coll"), - doc! {}, - UpdateModifications::Document(doc! {}), - false, - None, - false, - ) - } - pub(crate) fn with_update( ns: Namespace, filter: Document, @@ -186,29 +174,32 @@ impl<'a, T: Serialize> OperationWithDefaults for Update<'a, T> { &self, raw_response: RawCommandResponse, _description: &StreamDescription, - ) -> Result { - let response: WriteResponseBody = raw_response.body_utf8_lossy()?; - response.validate().map_err(convert_bulk_errors)?; - - let modified_count = response.n_modified; - let upserted_id = response - .upserted - .as_ref() - .and_then(|v| v.first()) - .and_then(|doc| doc.get("_id")) - .cloned(); - - let matched_count = if upserted_id.is_some() { - 0 - } else { - response.body.n - }; - - Ok(UpdateResult { - matched_count, - modified_count, - upserted_id, - }) + _session: Option<&mut ClientSession>, + ) -> OperationResponse<'static, Self::O> { + handle_response_sync! {{ + let response: WriteResponseBody = raw_response.body_utf8_lossy()?; + response.validate().map_err(convert_bulk_errors)?; + + let modified_count = response.n_modified; + let upserted_id = response + .upserted + .as_ref() + .and_then(|v| v.first()) + .and_then(|doc| doc.get("_id")) + .cloned(); + + let matched_count = if upserted_id.is_some() { + 0 + } else { + response.body.n + }; + + Ok(UpdateResult { + matched_count, + modified_count, + upserted_id, + }) + }} } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/update/test.rs b/src/operation/update/test.rs deleted file mode 100644 index a4e4c4cad..000000000 --- a/src/operation/update/test.rs +++ /dev/null @@ -1,116 +0,0 @@ -use pretty_assertions::assert_eq; - -use crate::{ - bson::{doc, Bson}, - error::{ErrorKind, WriteConcernError, WriteError, WriteFailure}, - operation::{test::handle_response_test, Update}, -}; - -#[test] -fn handle_success() { - let op = Update::empty(); - - let ok_response = doc! { - "ok": 1.0, - "n": 3, - "nModified": 1, - "upserted": [ - { "index": 0, "_id": 1 } - ] - }; - - let update_result = handle_response_test(&op, ok_response).unwrap(); - assert_eq!(update_result.matched_count, 0); - assert_eq!(update_result.modified_count, 1); - assert_eq!(update_result.upserted_id, Some(Bson::Int32(1))); -} - -#[test] -fn handle_success_no_upsert() { - let op = Update::empty(); - - let ok_response = doc! { - "ok": 1.0, - "n": 5, - "nModified": 2 - }; - - let update_result = handle_response_test(&op, ok_response).unwrap(); - assert_eq!(update_result.matched_count, 5); - assert_eq!(update_result.modified_count, 2); - assert_eq!(update_result.upserted_id, None); -} - -#[test] -fn handle_write_failure() { - let op = Update::empty(); - - let write_error_response = doc! { - "ok": 1.0, - "n": 12, - "nModified": 0, - "writeErrors": [ - { - "index": 0, - "code": 1234, - "errmsg": "my error string" - } - ] - }; - - let write_error = handle_response_test(&op, write_error_response).unwrap_err(); - match *write_error.kind { - ErrorKind::Write(WriteFailure::WriteError(ref error)) => { - let expected_err = WriteError { - code: 1234, - code_name: None, - message: "my error string".to_string(), - details: None, - }; - assert_eq!(error, &expected_err); - } - ref e => panic!("expected write error, got {:?}", e), - }; -} - -#[test] -fn handle_write_concern_failure() { - let op = Update::empty(); - - let wc_error_response = doc! { - "ok": 1.0, - "n": 0, - "nModified": 0, - "writeConcernError": { - "code": 456, - "codeName": "wcError", - "errmsg": "some message", - "errInfo": { - "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } - } - } - }; - - let wc_error = handle_response_test(&op, wc_error_response).unwrap_err(); - match *wc_error.kind { - ErrorKind::Write(WriteFailure::WriteConcernError(ref wc_error)) => { - let expected_wc_err = WriteConcernError { - code: 456, - code_name: "wcError".to_string(), - message: "some message".to_string(), - details: Some(doc! { "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } }), - labels: vec![], - }; - assert_eq!(wc_error, &expected_wc_err); - } - ref e => panic!("expected write concern error, got {:?}", e), - } -} diff --git a/src/serde_util.rs b/src/serde_util.rs index 78bfa80e5..d13283fbd 100644 --- a/src/serde_util.rs +++ b/src/serde_util.rs @@ -205,3 +205,22 @@ pub(crate) fn serialize_indexed_map( .collect(); string_map.serialize(serializer) } + +#[cfg(test)] +pub(crate) fn deserialize_indexed_map<'de, D, T>( + deserializer: D, +) -> std::result::Result>, D::Error> +where + D: Deserializer<'de>, + T: serde::de::DeserializeOwned, +{ + use std::str::FromStr; + + let string_map: HashMap = HashMap::deserialize(deserializer)?; + Ok(Some( + string_map + .into_iter() + .map(|(index, t)| (usize::from_str(&index).unwrap(), t)) + .collect(), + )) +} diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index dc37460c7..8354727ab 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -1,6 +1,70 @@ -use crate::test::spec::unified_runner::run_unified_tests; +use std::{sync::Arc, time::Duration}; -#[tokio::test] +use crate::{ + action::bulk_write::write_models::WriteModel, + bson::doc, + test::{spec::unified_runner::run_unified_tests, EventHandler}, + Client, + Namespace, +}; + +#[tokio::test(flavor = "multi_thread")] async fn run_unified() { run_unified_tests(&["crud", "unified", "new-bulk-write"]).await; } + +#[tokio::test] +async fn command_batching() { + let handler = Arc::new(EventHandler::new()); + let client = Client::test_builder() + .event_handler(handler.clone()) + .build() + .await; + let mut subscriber = handler.subscribe(); + + let max_object_size = client.server_info.max_bson_object_size as usize; + let max_message_size = client.server_info.max_message_size_bytes as usize; + + let namespace = Namespace::new("command_batching", "command_batching"); + let large_doc = doc! {"a": "b".repeat(max_object_size / 2)}; + let models = vec![ + WriteModel::InsertOne { + namespace: namespace.clone(), + document: large_doc, + }; + 3 + ]; + client.bulk_write(models).await.unwrap(); + + let (started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") + .await + .expect("no events observed"); + let ops = started.command.get_array("ops").unwrap(); + assert_eq!(ops.len(), 3); + + let large_doc = doc! { "a": "b".repeat(max_object_size - 5000) }; + let num_models = max_message_size / max_object_size + 1; + let models = vec![ + WriteModel::InsertOne { + namespace: namespace.clone(), + document: large_doc + }; + num_models + ]; + client.bulk_write(models).await.unwrap(); + + let (first_started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") + .await + .expect("no events observed"); + let first_len = first_started.command.get_array("ops").unwrap().len(); + assert!(first_len < num_models); + + let (second_started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") + .await + .expect("no events observed"); + let second_len = second_started.command.get_array("ops").unwrap().len(); + assert_eq!(first_len + second_len, num_models); +} diff --git a/src/test/coll.rs b/src/test/coll.rs index 9aeb871af..34571a503 100644 --- a/src/test/coll.rs +++ b/src/test/coll.rs @@ -1,11 +1,6 @@ use std::{fmt::Debug, sync::Arc, time::Duration}; -use crate::{ - event::command::CommandEvent, - test::{Event, EventHandler}, - Client, - Namespace, -}; +use crate::{test::EventHandler, Client, Namespace}; use bson::{rawdoc, RawDocumentBuf}; use futures::stream::{StreamExt, TryStreamExt}; use once_cell::sync::Lazy; @@ -1279,19 +1274,11 @@ async fn insert_many_document_sequences() { ]; collection.insert_many(docs, None).await.unwrap(); - let event = subscriber - .filter_map_event(Duration::from_millis(500), |e| match e { - Event::Command(command_event) => match command_event { - CommandEvent::Started(started) if started.command_name.as_str() == "insert" => { - Some(started) - } - _ => None, - }, - _ => None, - }) + let (started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "insert") .await - .expect("did not observe command started event for insert"); - let insert_documents = event.command.get_array("documents").unwrap(); + .expect("did not observe successful command events for insert"); + let insert_documents = started.command.get_array("documents").unwrap(); assert_eq!(insert_documents.len(), 2); // Build up a list of documents that exceeds max_message_size @@ -1307,33 +1294,17 @@ async fn insert_many_document_sequences() { let total_docs = docs.len(); collection.insert_many(docs, None).await.unwrap(); - let first_event = subscriber - .filter_map_event(Duration::from_millis(500), |e| match e { - Event::Command(command_event) => match command_event { - CommandEvent::Started(started) if started.command_name.as_str() == "insert" => { - Some(started) - } - _ => None, - }, - _ => None, - }) + let (first_started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "insert") .await - .expect("did not observe command started event for insert"); - let first_batch_len = first_event.command.get_array("documents").unwrap().len(); + .expect("did not observe successful command events for insert"); + let first_batch_len = first_started.command.get_array("documents").unwrap().len(); assert!(first_batch_len < total_docs); - let second_event = subscriber - .filter_map_event(Duration::from_millis(500), |e| match e { - Event::Command(command_event) => match command_event { - CommandEvent::Started(started) if started.command_name.as_str() == "insert" => { - Some(started) - } - _ => None, - }, - _ => None, - }) + let (second_started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "insert") .await - .expect("did not observe command started event for insert"); - let second_batch_len = second_event.command.get_array("documents").unwrap().len(); + .expect("did not observe successful command events for insert"); + let second_batch_len = second_started.command.get_array("documents").unwrap().len(); assert_eq!(first_batch_len + second_batch_len, total_docs); } diff --git a/src/test/spec.rs b/src/test/spec.rs index 06e390115..ef626f500 100644 --- a/src/test/spec.rs +++ b/src/test/spec.rs @@ -85,13 +85,16 @@ pub(crate) fn deserialize_spec_tests( .unwrap_or_else(|e| panic!("Failed to open file at {:?}: {}", &path, e)); // Use BSON as an intermediary to deserialize extended JSON properly. - let test_bson: Bson = serde_json::from_reader(file).unwrap_or_else(|e| { + let deserializer = &mut serde_json::Deserializer::from_reader(file); + let test_bson: Bson = serde_path_to_error::deserialize(deserializer).unwrap_or_else(|e| { panic!( "Failed to deserialize test JSON to BSON in {:?}: {}", &path, e ) }); - let test: T = bson::from_bson(test_bson).unwrap_or_else(|e| { + + let deserializer = bson::Deserializer::new(test_bson); + let test: T = serde_path_to_error::deserialize(deserializer).unwrap_or_else(|e| { panic!( "Failed to deserialize test BSON to {} in {:?}: {}", type_name::(), diff --git a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-comment.json b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-comment.json deleted file mode 100644 index 489582310..000000000 --- a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-comment.json +++ /dev/null @@ -1,97 +0,0 @@ -{ - "description": "client bulkWrite comment", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll0" - } - } - ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [] - } - ], - "tests": [ - { - "description": "bulkWrite comment", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "document": { - "_id": "1" - } - } - } - ], - "comment": { - "bulk": "write" - } - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "bulkWrite": 1, - "comment": { - "bulk": "write" - } - } - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": "1" - } - ] - } - ] - } - ] -} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-let.json b/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-let.json deleted file mode 100644 index 0a720ae1f..000000000 --- a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-let.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "description": "client bulkWrite let variables", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll0" - } - } - ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - ], - "tests": [ - { - "description": "bulkWrite let variables", - "runOnRequirements": [ - { - "minServerVersion": "7.0" - } - ], - "operations": [ - { - "object": "client0", - "name": "bulkWrite", - "arguments": { - "let": { - "id1": 1, - "id2": 2 - }, - "requests": [ - { - "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "$expr": { - "$eq": [ - "$_id", - "$$id1" - ] - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, - "filter": { - "$expr": { - "$eq": [ - "$_id", - "$$id2" - ] - } - } - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "bulkWrite": 1, - "let": { - "id1": 1, - "id2": 2 - } - } - } - } - ] - } - ], - "outcome": [ - { - "databaseName": "crud-tests", - "collectionName": "coll0", - "documents": [ - { - "_id": 1, - "x": 12 - } - ] - } - ] - } - ] -} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json new file mode 100644 index 000000000..77a67625f --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json @@ -0,0 +1,260 @@ +{ + "description": "client bulkWrite delete options", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "client bulk write delete with collation", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + } + } + }, + { + "deleteMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + } + } + } + ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + }, + { + "description": "client bulk write delete with hint", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "hint": "_id_", + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "multi": true + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json new file mode 100644 index 000000000..10ddb7a5d --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json @@ -0,0 +1,437 @@ +{ + "description": "client bulkWrite errors", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 2 + }, + { + "_id": 3, + "x": 3 + } + ] + } + ], + "tests": [ + { + "description": "an individual operation fails during an ordered bulkWrite", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 2 + }, + { + "_id": 3, + "x": 3 + } + ] + } + ] + }, + { + "description": "an individual operation fails during an unordered bulkWrite", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "2": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 2 + } + ] + } + ] + }, + { + "description": "detailed results are omitted from error when verboseResults is false", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a top-level failure occurs during a bulkWrite", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "x": 1 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "errorCode": 8 + } + } + ] + }, + { + "description": "a write concern error occurs during a bulkWrite", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 10 + } + }, + "updateResults": {}, + "deleteResults": {} + }, + "writeConcernErrors": [ + { + "code": 91, + "errmsg": "Replication is being shut down" + } + ] + } + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-mixed_namespaces.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json similarity index 95% rename from src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-mixed_namespaces.json rename to src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json index fa60526ac..f408a8969 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-mixed_namespaces.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json @@ -1,5 +1,5 @@ { - "description": "bulkWrite namespaces", + "description": "client bulkWrite with mixed namespaces", "schemaVersion": "1.0", "createEntities": [ { @@ -61,7 +61,8 @@ "x": 11 }, { - "_id": 2 + "_id": 2, + "x": 22 } ] }, @@ -70,28 +71,30 @@ "collectionName": "coll2", "documents": [ { - "_id": 3 + "_id": 3, + "x": 33 }, { - "_id": 4 + "_id": 4, + "x": 44 } ] } ], "tests": [ { - "description": "bulkWrite mixed namespaces", + "description": "client bulkWrite with mixed namespaces", "runOnRequirements": [ { - "minServerVersion": "7.0" + "minServerVersion": "8.0" } ], "operations": [ { "object": "client0", - "name": "bulkWrite", + "name": "clientBulkWrite", "arguments": { - "requests": [ + "models": [ { "insertOne": { "namespace": { diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json new file mode 100644 index 000000000..80e5fef10 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json @@ -0,0 +1,322 @@ +{ + "description": "client bulkWrite top-level options", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "client bulkWrite comment", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "comment": { + "bulk": "write" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "comment": { + "bulk": "write" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "bypassDocumentValidation": true, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "bypassDocumentValidation": true + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite let", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "let": { + "id1": 1, + "id2": 2 + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 1, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + } + }, + "deleteResults": { + "1": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "let": { + "id1": 1, + "id2": 2 + } + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-ordered.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json similarity index 83% rename from src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-ordered.json rename to src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json index 48c5a7bd9..546a980b2 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-ordered.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json @@ -1,5 +1,5 @@ { - "description": "client bulkWrite ordered option", + "description": "client bulkWrite with ordered option", "schemaVersion": "1.0", "createEntities": [ { @@ -34,18 +34,18 @@ ], "tests": [ { - "description": "unordered bulkWrite", + "description": "client bulkWrite with ordered: false", "runOnRequirements": [ { - "minServerVersion": "7.0" + "minServerVersion": "8.0" } ], "operations": [ { "object": "client0", - "name": "bulkWrite", + "name": "clientBulkWrite", "arguments": { - "requests": [ + "models": [ { "insertOne": { "namespace": { @@ -53,7 +53,7 @@ "coll": "coll0" }, "document": { - "_id": 4 + "_id": 1 } } } @@ -69,7 +69,7 @@ "deletedCount": 0, "insertResults": { "0": { - "insertedId": 4 + "insertedId": 1 } }, "updateResults": {}, @@ -85,14 +85,6 @@ "commandStartedEvent": { "command": { "bulkWrite": 1, - "ops": [ - { - "insert": 0, - "document": { - "_id": 4 - } - } - ], "ordered": false } } @@ -106,25 +98,25 @@ "databaseName": "crud-tests", "documents": [ { - "_id": 4 + "_id": 1 } ] } ] }, { - "description": "ordered bulkWrite", + "description": "client bulkWrite with ordered: true", "runOnRequirements": [ { - "minServerVersion": "7.0" + "minServerVersion": "8.0" } ], "operations": [ { "object": "client0", - "name": "bulkWrite", + "name": "clientBulkWrite", "arguments": { - "requests": [ + "models": [ { "insertOne": { "namespace": { @@ -164,14 +156,6 @@ "commandStartedEvent": { "command": { "bulkWrite": 1, - "ops": [ - { - "insert": 0, - "document": { - "_id": 4 - } - } - ], "ordered": true } } @@ -192,18 +176,18 @@ ] }, { - "description": "bulkWrite defaults to ordered", + "description": "client bulkWrite defaults to ordered: true", "runOnRequirements": [ { - "minServerVersion": "7.0" + "minServerVersion": "8.0" } ], "operations": [ { "object": "client0", - "name": "bulkWrite", + "name": "clientBulkWrite", "arguments": { - "requests": [ + "models": [ { "insertOne": { "namespace": { diff --git a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-results.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json similarity index 62% rename from src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-results.json rename to src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json index b20baecf5..489b581d0 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/bulkWrite-results.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite results", - "schemaVersion": "1.0", + "schemaVersion": "1.18", "createEntities": [ { "client": { @@ -59,18 +59,18 @@ ], "tests": [ { - "description": "verbose bulkWrite returns detailed results", + "description": "client bulkWrite with verboseResults: true returns detailed results", "runOnRequirements": [ { - "minServerVersion": "7.0" + "minServerVersion": "8.0" } ], "operations": [ { "object": "client0", - "name": "bulkWrite", + "name": "clientBulkWrite", "arguments": { - "requests": [ + "models": [ { "insertOne": { "namespace": { @@ -180,7 +180,7 @@ "expectResult": { "insertedCount": 1, "upsertedCount": 1, - "matchedCount": 4, + "matchedCount": 3, "modifiedCount": 3, "deletedCount": 3, "insertResults": { @@ -224,7 +224,92 @@ "commandStartedEvent": { "command": { "bulkWrite": 1, - "errorsOnly": false + "errorsOnly": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] } } } @@ -261,18 +346,18 @@ ] }, { - "description": "summary bulkWrite omits detailed results", + "description": "client bulkWrite with verboseResults: false omits detailed results", "runOnRequirements": [ { - "minServerVersion": "7.0" + "minServerVersion": "8.0" } ], "operations": [ { "object": "client0", - "name": "bulkWrite", + "name": "clientBulkWrite", "arguments": { - "requests": [ + "models": [ { "insertOne": { "namespace": { @@ -382,13 +467,11 @@ "expectResult": { "insertedCount": 1, "upsertedCount": 1, - "matchedCount": 4, + "matchedCount": 3, "modifiedCount": 3, "deletedCount": 3, "insertResults": { - "0": { - "insertedId": 8 - } + "$$unsetOrMatches": {} }, "updateResults": { "$$unsetOrMatches": {} @@ -407,7 +490,92 @@ "commandStartedEvent": { "command": { "bulkWrite": 1, - "errorsOnly": true + "errorsOnly": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] } } } @@ -444,18 +612,18 @@ ] }, { - "description": "bulkWrite defaults to summary results", + "description": "client bulkWrite defaults to verboseResults: false", "runOnRequirements": [ { - "minServerVersion": "7.0" + "minServerVersion": "8.0" } ], "operations": [ { "object": "client0", - "name": "bulkWrite", + "name": "clientBulkWrite", "arguments": { - "requests": [ + "models": [ { "insertOne": { "namespace": { @@ -564,13 +732,11 @@ "expectResult": { "insertedCount": 1, "upsertedCount": 1, - "matchedCount": 4, + "matchedCount": 3, "modifiedCount": 3, "deletedCount": 3, "insertResults": { - "0": { - "insertedId": 8 - } + "$$unsetOrMatches": {} }, "updateResults": { "$$unsetOrMatches": {} @@ -589,7 +755,92 @@ "commandStartedEvent": { "command": { "bulkWrite": 1, - "errorsOnly": true + "errorsOnly": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] } } } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json new file mode 100644 index 000000000..5b442f300 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json @@ -0,0 +1,900 @@ +{ + "description": "client bulkWrite update options", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ], + "tests": [ + { + "description": "client bulkWrite update with arrayFilters", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + }, + { + "updateMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + } + ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": true + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 4, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with collation", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "updateMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "replaceOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + } + } + } + ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + }, + "multi": false + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with hint", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_" + } + } + ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_", + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_", + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_", + "multi": false + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with upsert", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 5 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true + } + }, + { + "replaceOne": { + "namespace": { + "db": "crud-tests", + "coll": "coll0" + }, + "filter": { + "_id": 6 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true + } + } + ] + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "update": 0, + "filter": { + "_id": 5 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 6 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true, + "multi": false + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 5, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 6, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json new file mode 100644 index 000000000..8d3324b59 --- /dev/null +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json @@ -0,0 +1,730 @@ +{ + "description": "client bulkWrite retryable writes", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 222 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": { + "db": "retryable-writes-tests", + "coll": "coll0" + }, + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "writeConcernErrors": [ + { + "code": 91, + "errmsg": "Replication is being shut down" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml new file mode 100644 index 000000000..a882a8f6c --- /dev/null +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml @@ -0,0 +1,338 @@ +description: "client bulkWrite retryable writes" +schemaVersion: "1.18" +runOnRequirements: + - minServerVersion: "8.0" + topologies: [ replicaset ] + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +tests: + - description: "client bulkWrite with no multi: true operations succeeds after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: + db: *database0Name + coll: *collection0Name + document: { _id: 4, x: 44 } + - updateOne: + namespace: + db: *database0Name + coll: *collection0Name + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: + db: *database0Name + coll: *collection0Name + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: + db: *database0Name + coll: *collection0Name + filter: { _id: 3 } + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: retryable-writes-tests.coll0 + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: retryable-writes-tests.coll0 + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 222 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with multi: true operations fails after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: + db: *database0Name + coll: *collection0Name + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: + db: *database0Name + coll: *collection0Name + filter: { _id: 3 } + expectError: + errorCode: 189 + errorLabelsContain: [ RetryableWriteError ] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: retryable-writes-tests.coll0 + - description: "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: + db: *database0Name + coll: *collection0Name + document: { _id: 4, x: 44 } + - updateOne: + namespace: + db: *database0Name + coll: *collection0Name + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: + db: *database0Name + coll: *collection0Name + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: + db: *database0Name + coll: *collection0Name + filter: { _id: 3 } + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: retryable-writes-tests.coll0 + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: retryable-writes-tests.coll0 + - description: "client bulkWrite with multi: true operations fails after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: + db: *database0Name + coll: *collection0Name + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: + db: *database0Name + coll: *collection0Name + filter: { _id: 3 } + expectError: + writeConcernErrors: + - code: 91 + errmsg: "Replication is being shut down" + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: retryable-writes-tests.coll0 diff --git a/src/test/spec/json/transactions/unified/client-bulkWrite.json b/src/test/spec/json/transactions/unified/client-bulkWrite.json new file mode 100644 index 000000000..74e99f77a --- /dev/null +++ b/src/test/spec/json/transactions/unified/client-bulkWrite.json @@ -0,0 +1,401 @@ +{ + "description": "client bulkWrite transactions", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "databaseName": "transaction-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "tests": [ + { + "description": "transactional client bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": { + "db": "transaction-tests", + "coll": "coll0" + }, + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": { + "db": "transaction-tests", + "coll": "coll0" + }, + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": { + "db": "transaction-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": { + "db": "transaction-tests", + "coll": "coll0" + }, + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": { + "db": "transaction-tests", + "coll": "coll0" + }, + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": { + "db": "transaction-tests", + "coll": "coll0" + }, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": null + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ], + "errorsOnly": false, + "ordered": true + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index 6a92439f7..b808c9803 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -1,10 +1,6 @@ mod bulk_write; - #[cfg(feature = "in-use-encryption-unstable")] mod csfle; -#[cfg(feature = "in-use-encryption-unstable")] -use self::csfle::*; - mod search_index; use std::{ @@ -79,7 +75,7 @@ use crate::{ runtime, selection_criteria::ReadPreference, serde_util, - test::{spec::unified_runner::operation::bulk_write::BulkWrite, FailPoint}, + test::FailPoint, Collection, Database, IndexModel, @@ -87,6 +83,11 @@ use crate::{ TopologyType, }; +use bulk_write::*; +#[cfg(feature = "in-use-encryption-unstable")] +use csfle::*; +use search_index::*; + pub(crate) trait TestOperation: Debug + Send + Sync { fn execute_test_runner_operation<'a>( &'a self, @@ -155,6 +156,7 @@ macro_rules! with_mut_session { } }; } +use with_mut_session; #[derive(Debug)] pub(crate) struct Operation { @@ -382,22 +384,12 @@ impl<'de> Deserialize<'de> for Operation { #[cfg(feature = "in-use-encryption-unstable")] "removeKeyAltName" => deserialize_op::(definition.arguments), "iterateOnce" => deserialize_op::(definition.arguments), - "createSearchIndex" => { - deserialize_op::(definition.arguments) - } - "createSearchIndexes" => { - deserialize_op::(definition.arguments) - } - "dropSearchIndex" => { - deserialize_op::(definition.arguments) - } - "listSearchIndexes" => { - deserialize_op::(definition.arguments) - } - "updateSearchIndex" => { - deserialize_op::(definition.arguments) - } - "bulkWrite" => deserialize_op::(definition.arguments), + "createSearchIndex" => deserialize_op::(definition.arguments), + "createSearchIndexes" => deserialize_op::(definition.arguments), + "dropSearchIndex" => deserialize_op::(definition.arguments), + "listSearchIndexes" => deserialize_op::(definition.arguments), + "updateSearchIndex" => deserialize_op::(definition.arguments), + "clientBulkWrite" => deserialize_op::(definition.arguments), s => Ok(Box::new(UnimplementedOperation { _name: s.to_string(), }) as Box), diff --git a/src/test/spec/unified_runner/operation/bulk_write.rs b/src/test/spec/unified_runner/operation/bulk_write.rs index 1b81cf0af..091582a06 100644 --- a/src/test/spec/unified_runner/operation/bulk_write.rs +++ b/src/test/spec/unified_runner/operation/bulk_write.rs @@ -3,20 +3,21 @@ use futures_util::FutureExt; use serde::Deserialize; use crate::{ + action::bulk_write::{write_models::WriteModel, BulkWriteOptions}, bson::{Array, Bson, Document}, - client::bulk_write::{models::WriteModel, BulkWriteOptions}, coll::options::UpdateModifications, error::Result, test::spec::unified_runner::{Entity, TestRunner}, Namespace, }; -use super::TestOperation; +use super::{with_mut_session, TestOperation}; #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase", deny_unknown_fields)] pub(super) struct BulkWrite { - requests: Vec, + session: Option, + models: Vec, #[serde(flatten)] options: BulkWriteOptions, } @@ -26,13 +27,14 @@ impl<'de> Deserialize<'de> for WriteModel { where D: serde::Deserializer<'de>, { - #[derive(Deserialize)] + #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] enum WriteModelHelper { InsertOne { namespace: Namespace, document: Document, }, + #[serde(rename_all = "camelCase")] UpdateOne { namespace: Namespace, filter: Document, @@ -42,6 +44,7 @@ impl<'de> Deserialize<'de> for WriteModel { hint: Option, upsert: Option, }, + #[serde(rename_all = "camelCase")] UpdateMany { namespace: Namespace, filter: Document, @@ -51,6 +54,7 @@ impl<'de> Deserialize<'de> for WriteModel { hint: Option, upsert: Option, }, + #[serde(rename_all = "camelCase")] ReplaceOne { namespace: Namespace, filter: Document, @@ -170,26 +174,25 @@ impl TestOperation for BulkWrite { ) -> BoxFuture<'a, Result>> { async move { let client = test_runner.get_client(id).await; - let mut action = client.bulk_write(self.requests.clone()); - if let Some(ordered) = self.options.ordered { - action = action.ordered(ordered); - } - if let Some(bypass_document_validation) = self.options.bypass_document_validation { - action = action.bypass_document_validation(bypass_document_validation); - } - if let Some(ref comment) = self.options.comment { - action = action.comment(comment.clone()); - } - if let Some(ref let_vars) = self.options.let_vars { - action = action.let_vars(let_vars.clone()); - } - let bson = if let Some(true) = self.options.verbose_results { - let result = action.verbose_results().await?; - bson::to_bson(&result) - } else { - let result = action.await?; - bson::to_bson(&result) + let result = match self.session { + Some(ref session_id) => { + with_mut_session!(test_runner, session_id, |session| async { + client + .bulk_write(self.models.clone()) + .with_options(self.options.clone()) + .session(session) + .await + }) + .await + } + None => { + client + .bulk_write(self.models.clone()) + .with_options(self.options.clone()) + .await + } }?; + let bson = bson::to_bson(&result)?; Ok(Some(bson.into())) } .boxed() diff --git a/src/test/spec/unified_runner/test_file.rs b/src/test/spec/unified_runner/test_file.rs index 5b5a2e9cb..691bd9b04 100644 --- a/src/test/spec/unified_runner/test_file.rs +++ b/src/test/spec/unified_runner/test_file.rs @@ -1,6 +1,4 @@ -#[cfg(feature = "tracing-unstable")] -use std::collections::HashMap; -use std::{borrow::Cow, fmt::Write, sync::Arc, time::Duration}; +use std::{borrow::Cow, collections::HashMap, fmt::Write, sync::Arc, time::Duration}; use percent_encoding::NON_ALPHANUMERIC; use regex::Regex; @@ -13,10 +11,11 @@ use super::{results_match, ExpectedEvent, ObserveEvent, Operation}; #[cfg(feature = "tracing-unstable")] use crate::trace; use crate::{ + action::bulk_write::error::BulkWriteError, bson::{doc, Bson, Deserializer as BsonDeserializer, Document}, client::options::{ServerApi, ServerApiVersion}, concern::{Acknowledgment, ReadConcernLevel}, - error::Error, + error::{Error, ErrorKind}, gridfs::options::GridFsBucketOptions, options::{ ClientOptions, @@ -475,6 +474,9 @@ pub(crate) struct ExpectError { #[serde(default, deserialize_with = "serde_util::deserialize_nonempty_vec")] pub(crate) error_labels_omit: Option>, pub(crate) expect_result: Option, + #[serde(default, deserialize_with = "serde_util::deserialize_indexed_map")] + pub(crate) write_errors: Option>, + pub(crate) write_concern_errors: Option>, } impl ExpectError { @@ -493,6 +495,7 @@ impl ExpectError { )); } } + if let Some(error_contains) = &self.error_contains { match &error.message() { Some(msg) if msg.contains(error_contains) => (), @@ -504,8 +507,9 @@ impl ExpectError { } } } + if let Some(error_code) = self.error_code { - match &error.sdam_code() { + match &error.code() { Some(code) => { if code != &error_code { return Err(format!( @@ -545,6 +549,7 @@ impl ExpectError { } } } + if let Some(error_labels_contain) = &self.error_labels_contain { for label in error_labels_contain { if !error.contains_label(label) { @@ -555,6 +560,7 @@ impl ExpectError { } } } + if let Some(error_labels_omit) = &self.error_labels_omit { for label in error_labels_omit { if error.contains_label(label) { @@ -565,9 +571,69 @@ impl ExpectError { } } } - if self.expect_result.is_some() { - // TODO RUST-260: match against partial results + + if let Some(ref expected_result) = self.expect_result { + let actual_result = match *error.kind { + ErrorKind::ClientBulkWrite(BulkWriteError { + partial_result: Some(ref partial_result), + .. + }) => Some(bson::to_bson(partial_result).map_err(|e| e.to_string())?), + _ => None, + }; + results_match(actual_result.as_ref(), expected_result, false, None)?; } + + if let Some(ref write_errors) = self.write_errors { + let actual_write_errors = match *error.kind { + ErrorKind::ClientBulkWrite(ref bulk_write_error) => &bulk_write_error.write_errors, + ref other => { + return Err(format!( + "{}: expected bulk write error, got {:?}", + description, other + )) + } + }; + + for (expected_index, expected_error) in write_errors { + let actual_error = actual_write_errors.get(expected_index).ok_or_else(|| { + format!( + "{}: expected error for operation at index {}", + description, expected_index + ) + })?; + let actual_error = bson::to_bson(&actual_error).map_err(|e| e.to_string())?; + results_match(Some(&actual_error), expected_error, true, None)?; + } + } + + if let Some(ref write_concern_errors) = self.write_concern_errors { + let actual_write_concern_errors = match *error.kind { + ErrorKind::ClientBulkWrite(ref bulk_write_error) => { + &bulk_write_error.write_concern_errors + } + ref other => { + return Err(format!( + "{}: expected bulk write error, got {:?}", + description, other + )) + } + }; + + if actual_write_concern_errors.len() != write_concern_errors.len() { + return Err(format!( + "{}: got {} write errors, expected {}", + description, + actual_write_concern_errors.len(), + write_concern_errors.len() + )); + } + + for (actual, expected) in actual_write_concern_errors.iter().zip(write_concern_errors) { + let actual = bson::to_bson(&actual).map_err(|e| e.to_string())?; + results_match(Some(&actual), expected, true, None)?; + } + } + Ok(()) } } diff --git a/src/test/spec/unified_runner/test_runner.rs b/src/test/spec/unified_runner/test_runner.rs index c7a609293..f32a7f86e 100644 --- a/src/test/spec/unified_runner/test_runner.rs +++ b/src/test/spec/unified_runner/test_runner.rs @@ -65,6 +65,7 @@ use crate::test::{ }; const SKIPPED_OPERATIONS: &[&str] = &[ + "bulkWrite", "count", "listCollectionObjects", "listDatabaseObjects", @@ -74,7 +75,7 @@ const SKIPPED_OPERATIONS: &[&str] = &[ ]; static MIN_SPEC_VERSION: Version = Version::new(1, 0, 0); -static MAX_SPEC_VERSION: Version = Version::new(1, 17, 0); +static MAX_SPEC_VERSION: Version = Version::new(1, 18, 0); pub(crate) type EntityMap = HashMap; @@ -365,7 +366,7 @@ impl TestRunner { .await .unwrap(); - assert_eq!(expected_data.documents, actual_data); + assert_eq!(actual_data, expected_data.documents); } } } From 42a2ae4d0b17125b113923c8aa11f602e46fb381 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 26 Feb 2024 10:16:57 -0700 Subject: [PATCH 06/75] add server responses file --- src/operation/bulk_write/server_responses.rs | 69 ++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 src/operation/bulk_write/server_responses.rs diff --git a/src/operation/bulk_write/server_responses.rs b/src/operation/bulk_write/server_responses.rs new file mode 100644 index 000000000..42a35f72d --- /dev/null +++ b/src/operation/bulk_write/server_responses.rs @@ -0,0 +1,69 @@ +use serde::Deserialize; + +use crate::{ + action::bulk_write::results::BulkWriteResult, + bson::Bson, + error::WriteError, + operation::CursorInfo, +}; + +/// The top-level response to the bulkWrite command. +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub(super) struct Response { + pub(super) cursor: CursorInfo, + #[serde(flatten)] + pub(super) summary: SummaryInfo, +} + +/// The summary information contained within the top-level response to the bulkWrite command. +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +pub(super) struct SummaryInfo { + pub(super) n_errors: i64, + pub(super) n_inserted: i64, + pub(super) n_matched: i64, + pub(super) n_modified: i64, + pub(super) n_upserted: i64, + pub(super) n_deleted: i64, +} + +impl BulkWriteResult { + pub(super) fn populate_summary_info(&mut self, summary_info: &SummaryInfo) { + self.inserted_count += summary_info.n_inserted; + self.upserted_count += summary_info.n_upserted; + self.matched_count += summary_info.n_matched; + self.modified_count += summary_info.n_modified; + self.deleted_count += summary_info.n_deleted; + } +} + +/// The structure of the response for a single operation within the results cursor. +#[derive(Debug, Deserialize)] +pub(super) struct SingleOperationResponse { + #[serde(rename = "idx")] + pub(super) index: usize, + #[serde(flatten)] + pub(super) result: SingleOperationResult, +} + +/// The structure of the non-index fields for a single operation within the results cursor. +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub(super) enum SingleOperationResult { + // This variant must be listed first for proper deserialization. + Error(WriteError), + #[serde(rename_all = "camelCase")] + Success { + n: u64, + n_modified: Option, + upserted: Option, + }, +} + +/// The structure of the inserted ID for an upserted document. +#[derive(Debug, Deserialize)] +pub(super) struct UpsertedId { + #[serde(rename = "_id")] + pub(super) id: Bson, +} From 6588a5e045b0fcf1a76ceeb966a89c2a8db2aa9a Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 26 Feb 2024 14:30:42 -0700 Subject: [PATCH 07/75] add batching prose tests --- src/test/bulk_write.rs | 106 +++++++++++++++++++++++++++++------------ 1 file changed, 76 insertions(+), 30 deletions(-) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 8354727ab..5bfdf1b9d 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -14,7 +14,7 @@ async fn run_unified() { } #[tokio::test] -async fn command_batching() { +async fn max_write_batch_size_batching() { let handler = Arc::new(EventHandler::new()); let client = Client::test_builder() .event_handler(handler.clone()) @@ -22,49 +22,95 @@ async fn command_batching() { .await; let mut subscriber = handler.subscribe(); - let max_object_size = client.server_info.max_bson_object_size as usize; - let max_message_size = client.server_info.max_message_size_bytes as usize; + let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; - let namespace = Namespace::new("command_batching", "command_batching"); - let large_doc = doc! {"a": "b".repeat(max_object_size / 2)}; - let models = vec![ - WriteModel::InsertOne { - namespace: namespace.clone(), - document: large_doc, - }; - 3 - ]; - client.bulk_write(models).await.unwrap(); + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document: doc! { "a": "b" }, + }; + let models = vec![model; max_write_batch_size + 1]; + + let result = client.bulk_write(models).await.unwrap(); + assert_eq!(result.inserted_count as usize, max_write_batch_size + 1); + + let (first_started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") + .await + .expect("no events observed"); + let first_len = first_started.command.get_array("ops").unwrap().len(); + assert_eq!(first_len, max_write_batch_size); + + let (second_started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") + .await + .expect("no events observed"); + let second_len = second_started.command.get_array("ops").unwrap().len(); + assert_eq!(second_len, 1); +} + +#[tokio::test] +async fn max_bson_object_size_with_document_sequences() { + let handler = Arc::new(EventHandler::new()); + let client = Client::test_builder() + .event_handler(handler.clone()) + .build() + .await; + let mut subscriber = handler.subscribe(); + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + + let document = doc! { "a": "b".repeat(max_bson_object_size / 2) }; + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document, + }; + let models = vec![model; 2]; + + let result = client.bulk_write(models).await.unwrap(); + assert_eq!(result.inserted_count as usize, 2); let (started, _) = subscriber .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") .await .expect("no events observed"); - let ops = started.command.get_array("ops").unwrap(); - assert_eq!(ops.len(), 3); - - let large_doc = doc! { "a": "b".repeat(max_object_size - 5000) }; - let num_models = max_message_size / max_object_size + 1; - let models = vec![ - WriteModel::InsertOne { - namespace: namespace.clone(), - document: large_doc - }; - num_models - ]; - client.bulk_write(models).await.unwrap(); + let len = started.command.get_array("ops").unwrap().len(); + assert_eq!(len, 2); +} + +#[tokio::test] +async fn max_message_size_bytes_batching() { + let handler = Arc::new(EventHandler::new()); + let client = Client::test_builder() + .event_handler(handler.clone()) + .build() + .await; + let mut subscriber = handler.subscribe(); + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; + + let document = doc! { "a": "b".repeat(max_bson_object_size - 500) }; + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document, + }; + let num_models = max_message_size_bytes / max_bson_object_size + 1; + let models = vec![model; num_models]; + + let result = client.bulk_write(models).await.unwrap(); + assert_eq!(result.inserted_count as usize, num_models); let (first_started, _) = subscriber .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") .await .expect("no events observed"); - let first_len = first_started.command.get_array("ops").unwrap().len(); - assert!(first_len < num_models); + let first_ops_len = first_started.command.get_array("ops").unwrap().len(); let (second_started, _) = subscriber .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") .await .expect("no events observed"); - let second_len = second_started.command.get_array("ops").unwrap().len(); - assert_eq!(first_len + second_len, num_models); + let second_ops_len = second_started.command.get_array("ops").unwrap().len(); + + assert_eq!(first_ops_len + second_ops_len, num_models); } From 3a84805a21e8a2551a14ed5a2e2c4ded852c3b68 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 29 Feb 2024 14:05:53 -0700 Subject: [PATCH 08/75] update tests --- src/action/bulk_write/results.rs | 53 ++++- src/coll.rs | 14 +- src/operation/bulk_write.rs | 19 +- src/serde_util.rs | 16 +- src/test/bulk_write.rs | 19 +- .../client-bulkWrite-delete-options.json | 91 ++++---- .../client-bulkWrite-errors.json | 89 +++----- .../client-bulkWrite-mixed-namespaces.json | 67 +++--- .../client-bulkWrite-options.json | 142 ++++++++---- .../client-bulkWrite-ordered.json | 117 +++++++--- .../client-bulkWrite-results.json | 124 +++------- .../client-bulkWrite-update-options.json | 216 ++++++++++-------- src/test/spec/unified_runner/test_runner.rs | 2 +- 13 files changed, 539 insertions(+), 430 deletions(-) diff --git a/src/action/bulk_write/results.rs b/src/action/bulk_write/results.rs index 82896863f..aaa794261 100644 --- a/src/action/bulk_write/results.rs +++ b/src/action/bulk_write/results.rs @@ -3,13 +3,15 @@ use std::{collections::HashMap, fmt::Debug}; use serde::Serialize; +use serde_with::skip_serializing_none; use crate::{ results::{DeleteResult, InsertOneResult, UpdateResult}, serde_util::serialize_indexed_map, }; -#[derive(Clone, Debug, Default, Serialize)] +#[skip_serializing_none] +#[derive(Clone, Debug, Serialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] pub struct BulkWriteResult { @@ -19,24 +21,43 @@ pub struct BulkWriteResult { pub modified_count: i64, pub deleted_count: i64, #[serde(serialize_with = "serialize_indexed_map")] - pub insert_results: HashMap, + pub insert_results: Option>, #[serde(serialize_with = "serialize_indexed_map")] - pub update_results: HashMap, + pub update_results: Option>, #[serde(serialize_with = "serialize_indexed_map")] - pub delete_results: HashMap, + pub delete_results: Option>, } impl BulkWriteResult { + pub(crate) fn new(verbose: bool) -> Self { + Self { + inserted_count: 0, + upserted_count: 0, + matched_count: 0, + modified_count: 0, + deleted_count: 0, + insert_results: verbose.then(HashMap::new), + update_results: verbose.then(HashMap::new), + delete_results: verbose.then(HashMap::new), + } + } + pub(crate) fn add_insert_result(&mut self, index: usize, insert_result: InsertOneResult) { - self.insert_results.insert(index, insert_result); + self.insert_results + .get_or_insert_with(Default::default) + .insert(index, insert_result); } pub(crate) fn add_update_result(&mut self, index: usize, update_result: UpdateResult) { - self.update_results.insert(index, update_result); + self.update_results + .get_or_insert_with(Default::default) + .insert(index, update_result); } pub(crate) fn add_delete_result(&mut self, index: usize, delete_result: DeleteResult) { - self.delete_results.insert(index, delete_result); + self.delete_results + .get_or_insert_with(Default::default) + .insert(index, delete_result); } pub(crate) fn merge(&mut self, other: Self) { @@ -45,8 +66,20 @@ impl BulkWriteResult { self.matched_count += other.matched_count; self.modified_count += other.modified_count; self.deleted_count += other.deleted_count; - self.insert_results.extend(other.insert_results); - self.update_results.extend(other.update_results); - self.delete_results.extend(other.delete_results); + if let Some(insert_results) = other.insert_results { + self.insert_results + .get_or_insert_with(Default::default) + .extend(insert_results); + } + if let Some(update_results) = other.update_results { + self.update_results + .get_or_insert_with(Default::default) + .extend(update_results); + } + if let Some(delete_results) = other.delete_results { + self.delete_results + .get_or_insert_with(Default::default) + .extend(delete_results); + } } } diff --git a/src/coll.rs b/src/coll.rs index 2b3175f9f..451880133 100644 --- a/src/coll.rs +++ b/src/coll.rs @@ -776,17 +776,9 @@ impl<'de> Deserialize<'de> for Namespace { where D: Deserializer<'de>, { - #[derive(Deserialize)] - #[serde(untagged)] - enum NamespaceHelper { - String(String), - Object { db: String, coll: String }, - } - match NamespaceHelper::deserialize(deserializer)? { - NamespaceHelper::String(string) => Self::from_str(&string) - .ok_or_else(|| D::Error::custom("Missing one or more fields in namespace")), - NamespaceHelper::Object { db, coll } => Ok(Self { db, coll }), - } + let s: String = Deserialize::deserialize(deserializer)?; + Self::from_str(&s) + .ok_or_else(|| D::Error::custom("Missing one or more fields in namespace")) } } diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 59b028358..6b72bc3dc 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -69,6 +69,13 @@ impl<'a> BulkWrite<'a> { } } + fn is_verbose(&self) -> bool { + self.options + .as_ref() + .and_then(|o| o.verbose_results) + .unwrap_or(false) + } + async fn iterate_results_cursor( &self, mut stream: impl TryStream + Unpin, @@ -90,7 +97,7 @@ impl<'a> BulkWrite<'a> { let inserted_id = self.get_inserted_id(result_index)?; let insert_result = InsertOneResult { inserted_id }; result - .get_or_insert_with(Default::default) + .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) .add_insert_result(result_index, insert_result); } OperationType::Update => { @@ -106,13 +113,13 @@ impl<'a> BulkWrite<'a> { upserted_id: upserted.map(|upserted| upserted.id), }; result - .get_or_insert_with(Default::default) + .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) .add_update_result(result_index, update_result); } OperationType::Delete => { let delete_result = DeleteResult { deleted_count: n }; result - .get_or_insert_with(Default::default) + .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) .add_delete_result(result_index, delete_result); } } @@ -283,7 +290,7 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { if response.summary.n_errors < self.n_attempted as i64 { bulk_write_error .partial_result - .get_or_insert_with(Default::default) + .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) .populate_summary_info(&response.summary); } @@ -322,7 +329,9 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { if bulk_write_error.write_errors.is_empty() && bulk_write_error.write_concern_errors.is_empty() { - Ok(bulk_write_error.partial_result.unwrap_or_default()) + Ok(bulk_write_error + .partial_result + .unwrap_or_else(|| BulkWriteResult::new(self.is_verbose()))) } else { let error = Error::new( ErrorKind::ClientBulkWrite(bulk_write_error), diff --git a/src/serde_util.rs b/src/serde_util.rs index d13283fbd..fdf69092c 100644 --- a/src/serde_util.rs +++ b/src/serde_util.rs @@ -196,14 +196,18 @@ where } pub(crate) fn serialize_indexed_map( - map: &HashMap, + map: &Option>, serializer: S, ) -> std::result::Result { - let string_map: BTreeMap<_, _> = map - .iter() - .map(|(index, result)| (index.to_string(), result)) - .collect(); - string_map.serialize(serializer) + if let Some(map) = map { + let string_map: BTreeMap<_, _> = map + .iter() + .map(|(index, result)| (index.to_string(), result)) + .collect(); + string_map.serialize(serializer) + } else { + serializer.serialize_none() + } } #[cfg(test)] diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 5bfdf1b9d..b28110377 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -3,7 +3,7 @@ use std::{sync::Arc, time::Duration}; use crate::{ action::bulk_write::write_models::WriteModel, bson::doc, - test::{spec::unified_runner::run_unified_tests, EventHandler}, + test::{log_uncaptured, spec::unified_runner::run_unified_tests, EventHandler}, Client, Namespace, }; @@ -22,6 +22,11 @@ async fn max_write_batch_size_batching() { .await; let mut subscriber = handler.subscribe(); + if client.server_version_lt(8, 0) { + log_uncaptured("skipping max_write_batch_size_batching: bulkWrite requires 8.0+"); + return; + } + let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; let model = WriteModel::InsertOne { @@ -57,6 +62,13 @@ async fn max_bson_object_size_with_document_sequences() { .await; let mut subscriber = handler.subscribe(); + if client.server_version_lt(8, 0) { + log_uncaptured( + "skipping max_bson_object_size_with_document_sequences: bulkWrite requires 8.0+", + ); + return; + } + let max_bson_object_size = client.server_info.max_bson_object_size as usize; let document = doc! { "a": "b".repeat(max_bson_object_size / 2) }; @@ -86,6 +98,11 @@ async fn max_message_size_bytes_batching() { .await; let mut subscriber = handler.subscribe(); + if client.server_version_lt(8, 0) { + log_uncaptured("skipping max_message_size_bytes_batching: bulkWrite requires 8.0+"); + return; + } + let max_bson_object_size = client.server_info.max_bson_object_size as usize; let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json index 77a67625f..8ad03a200 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json @@ -1,6 +1,11 @@ { "description": "client bulkWrite delete options", "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], "createEntities": [ { "client": { @@ -45,14 +50,16 @@ ] } ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, "tests": [ { "description": "client bulk write delete with collation", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -61,10 +68,7 @@ "models": [ { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 }, @@ -75,10 +79,7 @@ }, { "deleteMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": { "$gt": 1 @@ -89,7 +90,8 @@ } } } - ] + ], + "verboseResults": true }, "expectResult": { "insertedCount": 0, @@ -97,14 +99,15 @@ "matchedCount": 0, "modifiedCount": 0, "deletedCount": 3, - "insertResults": { - "$$unsetOrMatches": {} - }, - "updateResults": { - "$$unsetOrMatches": {} - }, + "insertResults": {}, + "updateResults": {}, "deleteResults": { - "$$unsetOrMatches": {} + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } } } } @@ -115,8 +118,12 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "delete": 0, @@ -140,6 +147,11 @@ }, "multi": true } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } ] } } @@ -157,11 +169,6 @@ }, { "description": "client bulk write delete with hint", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -170,10 +177,7 @@ "models": [ { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 }, @@ -182,10 +186,7 @@ }, { "deleteMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": { "$gt": 1 @@ -194,7 +195,8 @@ "hint": "_id_" } } - ] + ], + "verboseResults": true }, "expectResult": { "insertedCount": 0, @@ -202,14 +204,15 @@ "matchedCount": 0, "modifiedCount": 0, "deletedCount": 3, - "insertResults": { - "$$unsetOrMatches": {} - }, - "updateResults": { - "$$unsetOrMatches": {} - }, + "insertResults": {}, + "updateResults": {}, "deleteResults": { - "$$unsetOrMatches": {} + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } } } } @@ -220,8 +223,12 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "delete": 0, diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json index 10ddb7a5d..9719a9d97 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json @@ -1,6 +1,11 @@ { "description": "client bulkWrite errors", - "schemaVersion": "1.0", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], "createEntities": [ { "client": { @@ -10,7 +15,8 @@ ], "uriOptions": { "retryWrites": false - } + }, + "useMultipleMongoses": false } }, { @@ -35,27 +41,29 @@ "documents": [ { "_id": 1, - "x": 1 + "x": 11 }, { "_id": 2, - "x": 2 + "x": 22 }, { "_id": 3, - "x": 3 + "x": 33 } ] } ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + }, "tests": [ { "description": "an individual operation fails during an ordered bulkWrite", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -64,10 +72,7 @@ "models": [ { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 } @@ -75,10 +80,7 @@ }, { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$expr": { "$eq": [ @@ -91,10 +93,7 @@ }, { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 3 } @@ -133,11 +132,11 @@ "documents": [ { "_id": 2, - "x": 2 + "x": 22 }, { "_id": 3, - "x": 3 + "x": 33 } ] } @@ -158,10 +157,7 @@ "models": [ { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 } @@ -169,10 +165,7 @@ }, { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$expr": { "$eq": [ @@ -185,10 +178,7 @@ }, { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 3 } @@ -231,7 +221,7 @@ "documents": [ { "_id": 2, - "x": 2 + "x": 22 } ] } @@ -252,10 +242,7 @@ "models": [ { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 } @@ -263,10 +250,7 @@ }, { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$expr": { "$eq": [ @@ -279,10 +263,7 @@ }, { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 3 } @@ -346,10 +327,7 @@ "models": [ { "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "document": { "x": 1 } @@ -396,10 +374,7 @@ "models": [ { "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "document": { "_id": 10 } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json index f408a8969..6db78d2ac 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json @@ -1,6 +1,11 @@ { "description": "client bulkWrite with mixed namespaces", "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], "createEntities": [ { "client": { @@ -14,7 +19,7 @@ "database": { "id": "database0", "client": "client0", - "databaseName": "bulkWrite-db0" + "databaseName": "db0" } }, { @@ -35,7 +40,7 @@ "database": { "id": "database1", "client": "client0", - "databaseName": "bulkWrite-db1" + "databaseName": "db1" } }, { @@ -48,12 +53,12 @@ ], "initialData": [ { - "databaseName": "bulkWrite-db0", + "databaseName": "db0", "collectionName": "coll0", "documents": [] }, { - "databaseName": "bulkWrite-db0", + "databaseName": "db0", "collectionName": "coll1", "documents": [ { @@ -67,7 +72,7 @@ ] }, { - "databaseName": "bulkWrite-db1", + "databaseName": "db1", "collectionName": "coll2", "documents": [ { @@ -81,14 +86,14 @@ ] } ], + "_yamlAnchors": { + "db0Coll0Namespace": "db0.coll0", + "db0Coll1Namespace": "db0.coll1", + "db1Coll2Namespace": "db1.coll2" + }, "tests": [ { "description": "client bulkWrite with mixed namespaces", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -97,10 +102,7 @@ "models": [ { "insertOne": { - "namespace": { - "db": "bulkWrite-db0", - "coll": "coll0" - }, + "namespace": "db0.coll0", "document": { "_id": 1 } @@ -108,10 +110,7 @@ }, { "insertOne": { - "namespace": { - "db": "bulkWrite-db0", - "coll": "coll0" - }, + "namespace": "db0.coll0", "document": { "_id": 2 } @@ -119,10 +118,7 @@ }, { "updateOne": { - "namespace": { - "db": "bulkWrite-db0", - "coll": "coll1" - }, + "namespace": "db0.coll1", "filter": { "_id": 1 }, @@ -135,10 +131,7 @@ }, { "deleteOne": { - "namespace": { - "db": "bulkWrite-db1", - "coll": "coll2" - }, + "namespace": "db1.coll2", "filter": { "_id": 3 } @@ -146,10 +139,7 @@ }, { "deleteOne": { - "namespace": { - "db": "bulkWrite-db0", - "coll": "coll1" - }, + "namespace": "db0.coll1", "filter": { "_id": 2 } @@ -157,10 +147,7 @@ }, { "replaceOne": { - "namespace": { - "db": "bulkWrite-db1", - "coll": "coll2" - }, + "namespace": "db1.coll2", "filter": { "_id": 4 }, @@ -269,13 +256,13 @@ ], "nsInfo": [ { - "ns": "bulkWrite-db0.coll0" + "ns": "db0.coll0" }, { - "ns": "bulkWrite-db0.coll1" + "ns": "db0.coll1" }, { - "ns": "bulkWrite-db1.coll2" + "ns": "db1.coll2" } ] } @@ -286,7 +273,7 @@ ], "outcome": [ { - "databaseName": "bulkWrite-db0", + "databaseName": "db0", "collectionName": "coll0", "documents": [ { @@ -298,7 +285,7 @@ ] }, { - "databaseName": "bulkWrite-db0", + "databaseName": "db0", "collectionName": "coll1", "documents": [ { @@ -308,7 +295,7 @@ ] }, { - "databaseName": "bulkWrite-db1", + "databaseName": "db1", "collectionName": "coll2", "documents": [ { diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json index 80e5fef10..d92be7b19 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json @@ -1,6 +1,11 @@ { "description": "client bulkWrite top-level options", "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], "createEntities": [ { "client": { @@ -41,14 +46,19 @@ ] } ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "comment": { + "bulk": "write" + }, + "let": { + "id1": 1, + "id2": 2 + } + }, "tests": [ { "description": "client bulkWrite comment", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -57,10 +67,7 @@ "models": [ { "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "document": { "_id": 3, "x": 33 @@ -95,11 +102,29 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "comment": { "bulk": "write" - } + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] } } } @@ -129,11 +154,6 @@ }, { "description": "client bulkWrite bypassDocumentValidation", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -142,13 +162,10 @@ "models": [ { "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "document": { - "_id": 4, - "x": 44 + "_id": 3, + "x": 33 } } } @@ -164,7 +181,7 @@ "deletedCount": 0, "insertResults": { "0": { - "insertedId": 4 + "insertedId": 3 } }, "updateResults": {}, @@ -178,9 +195,27 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, - "bypassDocumentValidation": true + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] } } } @@ -201,8 +236,8 @@ "x": 22 }, { - "_id": 4, - "x": 44 + "_id": 3, + "x": 33 } ] } @@ -210,11 +245,6 @@ }, { "description": "client bulkWrite let", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -223,10 +253,7 @@ "models": [ { "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$expr": { "$eq": [ @@ -244,10 +271,7 @@ }, { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$expr": { "$eq": [ @@ -293,12 +317,52 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "let": { "id1": 1, "id2": 2 - } + }, + "ops": [ + { + "update": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] } } } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json index 546a980b2..5ce983f63 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json @@ -1,6 +1,11 @@ { "description": "client bulkWrite with ordered option", "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], "createEntities": [ { "client": { @@ -32,14 +37,12 @@ "documents": [] } ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, "tests": [ { "description": "client bulkWrite with ordered: false", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -48,12 +51,10 @@ "models": [ { "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "document": { - "_id": 1 + "_id": 1, + "x": 11 } } } @@ -83,9 +84,26 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, - "ordered": false + "errorsOnly": false, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] } } } @@ -98,7 +116,8 @@ "databaseName": "crud-tests", "documents": [ { - "_id": 1 + "_id": 1, + "x": 11 } ] } @@ -106,11 +125,6 @@ }, { "description": "client bulkWrite with ordered: true", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -119,12 +133,10 @@ "models": [ { "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "document": { - "_id": 4 + "_id": 1, + "x": 11 } } } @@ -140,7 +152,7 @@ "deletedCount": 0, "insertResults": { "0": { - "insertedId": 4 + "insertedId": 1 } }, "updateResults": {}, @@ -154,9 +166,26 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, - "ordered": true + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] } } } @@ -169,7 +198,8 @@ "databaseName": "crud-tests", "documents": [ { - "_id": 4 + "_id": 1, + "x": 11 } ] } @@ -177,11 +207,6 @@ }, { "description": "client bulkWrite defaults to ordered: true", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -190,12 +215,10 @@ "models": [ { "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "document": { - "_id": 4 + "_id": 1, + "x": 11 } } } @@ -210,7 +233,7 @@ "deletedCount": 0, "insertResults": { "0": { - "insertedId": 4 + "insertedId": 1 } }, "updateResults": {}, @@ -224,9 +247,26 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, - "ordered": true + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] } } } @@ -239,7 +279,8 @@ "databaseName": "crud-tests", "documents": [ { - "_id": 4 + "_id": 1, + "x": 11 } ] } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json index 489b581d0..92aa73464 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json @@ -1,6 +1,11 @@ { "description": "client bulkWrite results", - "schemaVersion": "1.18", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], "createEntities": [ { "client": { @@ -57,14 +62,12 @@ ] } ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, "tests": [ { "description": "client bulkWrite with verboseResults: true returns detailed results", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -73,10 +76,7 @@ "models": [ { "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "document": { "_id": 8, "x": 88 @@ -85,10 +85,7 @@ }, { "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 }, @@ -101,10 +98,7 @@ }, { "updateMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$and": [ { @@ -128,10 +122,7 @@ }, { "replaceOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 4 }, @@ -143,10 +134,7 @@ }, { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 5 } @@ -154,10 +142,7 @@ }, { "deleteMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$and": [ { @@ -222,9 +207,12 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, "errorsOnly": false, + "ordered": true, "ops": [ { "insert": 0, @@ -347,11 +335,6 @@ }, { "description": "client bulkWrite with verboseResults: false omits detailed results", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -360,10 +343,7 @@ "models": [ { "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "document": { "_id": 8, "x": 88 @@ -372,10 +352,7 @@ }, { "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 }, @@ -388,10 +365,7 @@ }, { "updateMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$and": [ { @@ -415,10 +389,7 @@ }, { "replaceOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 4 }, @@ -430,10 +401,7 @@ }, { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 5 } @@ -441,10 +409,7 @@ }, { "deleteMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$and": [ { @@ -488,9 +453,12 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, "errorsOnly": true, + "ordered": true, "ops": [ { "insert": 0, @@ -613,11 +581,6 @@ }, { "description": "client bulkWrite defaults to verboseResults: false", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -626,10 +589,7 @@ "models": [ { "insertOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "document": { "_id": 8, "x": 88 @@ -638,10 +598,7 @@ }, { "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 }, @@ -654,10 +611,7 @@ }, { "updateMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$and": [ { @@ -681,10 +635,7 @@ }, { "replaceOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 4 }, @@ -696,10 +647,7 @@ }, { "deleteOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 5 } @@ -707,10 +655,7 @@ }, { "deleteMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$and": [ { @@ -753,9 +698,12 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, "errorsOnly": true, + "ordered": true, "ops": [ { "insert": 0, diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json index 5b442f300..d163775bd 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json @@ -1,6 +1,11 @@ { "description": "client bulkWrite update options", "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], "createEntities": [ { "client": { @@ -65,14 +70,16 @@ ] } ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, "tests": [ { "description": "client bulkWrite update with arrayFilters", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -81,10 +88,7 @@ "models": [ { "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 }, @@ -104,10 +108,7 @@ }, { "updateMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$and": [ { @@ -136,7 +137,8 @@ ] } } - ] + ], + "verboseResults": true }, "expectResult": { "insertedCount": 0, @@ -144,15 +146,20 @@ "matchedCount": 3, "modifiedCount": 3, "deletedCount": 0, - "insertResults": { - "$$unsetOrMatches": {} - }, + "insertResults": {}, "updateResults": { - "$$unsetOrMatches": {} + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": null + } }, - "deleteResults": { - "$$unsetOrMatches": {} - } + "deleteResults": {} } } ], @@ -162,8 +169,12 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "update": 0, @@ -214,6 +225,11 @@ ], "multi": true } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } ] } } @@ -264,11 +280,6 @@ }, { "description": "client bulkWrite update with collation", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -277,10 +288,7 @@ "models": [ { "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 }, @@ -300,10 +308,7 @@ }, { "updateMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$and": [ { @@ -334,10 +339,7 @@ }, { "replaceOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 4 }, @@ -353,7 +355,8 @@ } } } - ] + ], + "verboseResults": true }, "expectResult": { "insertedCount": 0, @@ -361,15 +364,25 @@ "matchedCount": 4, "modifiedCount": 4, "deletedCount": 0, - "insertResults": { - "$$unsetOrMatches": {} - }, + "insertResults": {}, "updateResults": { - "$$unsetOrMatches": {} + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": null + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + } }, - "deleteResults": { - "$$unsetOrMatches": {} - } + "deleteResults": {} } } ], @@ -379,8 +392,12 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "update": 0, @@ -448,6 +465,11 @@ }, "multi": false } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } ] } } @@ -498,11 +520,6 @@ }, { "description": "client bulkWrite update with hint", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -511,10 +528,7 @@ "models": [ { "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 1 }, @@ -532,10 +546,7 @@ }, { "updateMany": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "$and": [ { @@ -564,10 +575,7 @@ }, { "replaceOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 4 }, @@ -581,7 +589,8 @@ "hint": "_id_" } } - ] + ], + "verboseResults": true }, "expectResult": { "insertedCount": 0, @@ -589,15 +598,25 @@ "matchedCount": 4, "modifiedCount": 4, "deletedCount": 0, - "insertResults": { - "$$unsetOrMatches": {} - }, + "insertResults": {}, "updateResults": { - "$$unsetOrMatches": {} + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": null + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + } }, - "deleteResults": { - "$$unsetOrMatches": {} - } + "deleteResults": {} } } ], @@ -607,8 +626,12 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "update": 0, @@ -670,6 +693,11 @@ "hint": "_id_", "multi": false } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } ] } } @@ -720,11 +748,6 @@ }, { "description": "client bulkWrite update with upsert", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -733,10 +756,7 @@ "models": [ { "updateOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 5 }, @@ -754,10 +774,7 @@ }, { "replaceOne": { - "namespace": { - "db": "crud-tests", - "coll": "coll0" - }, + "namespace": "crud-tests.coll0", "filter": { "_id": 6 }, @@ -771,7 +788,8 @@ "upsert": true } } - ] + ], + "verboseResults": true }, "expectResult": { "insertedCount": 0, @@ -779,15 +797,20 @@ "matchedCount": 0, "modifiedCount": 0, "deletedCount": 0, - "insertResults": { - "$$unsetOrMatches": {} - }, + "insertResults": {}, "updateResults": { - "$$unsetOrMatches": {} + "0": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 5 + }, + "1": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 6 + } }, - "deleteResults": { - "$$unsetOrMatches": {} - } + "deleteResults": {} } } ], @@ -797,8 +820,12 @@ "events": [ { "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", "command": { "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "update": 0, @@ -832,6 +859,11 @@ "upsert": true, "multi": false } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } ] } } diff --git a/src/test/spec/unified_runner/test_runner.rs b/src/test/spec/unified_runner/test_runner.rs index f32a7f86e..891701af7 100644 --- a/src/test/spec/unified_runner/test_runner.rs +++ b/src/test/spec/unified_runner/test_runner.rs @@ -75,7 +75,7 @@ const SKIPPED_OPERATIONS: &[&str] = &[ ]; static MIN_SPEC_VERSION: Version = Version::new(1, 0, 0); -static MAX_SPEC_VERSION: Version = Version::new(1, 18, 0); +static MAX_SPEC_VERSION: Version = Version::new(1, 20, 0); pub(crate) type EntityMap = HashMap; From 3a9d970241a30b6f21caa983294b8ab99c5e19da Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 29 Feb 2024 14:34:35 -0700 Subject: [PATCH 09/75] rebase fixes --- src/action/bulk_write.rs | 2 +- src/error.rs | 8 +------- src/operation.rs | 1 - src/selection_criteria.rs | 8 -------- 4 files changed, 2 insertions(+), 17 deletions(-) diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index b27ee75a9..d158f2073 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -118,7 +118,7 @@ impl<'a> BulkWrite<'a> { } action_impl! { - impl Action<'a> for BulkWrite<'a> { + impl<'a> Action for BulkWrite<'a> { type Future = BulkWriteFuture; async fn execute(mut self) -> Result { diff --git a/src/error.rs b/src/error.rs index b3efa73d2..3ef4a19f9 100644 --- a/src/error.rs +++ b/src/error.rs @@ -7,18 +7,12 @@ use std::{ sync::Arc, }; -use crate::{ - bson::Document, - options::ServerAddress, - sdam::{ServerType, TopologyVersion}, -}; -use bson::Bson; use serde::{Deserialize, Serialize}; use thiserror::Error; use crate::{ action::bulk_write::error::BulkWriteError as ClientBulkWriteError, - bson::Document, + bson::{Bson, Document}, options::ServerAddress, sdam::{ServerType, TopologyVersion}, }; diff --git a/src/operation.rs b/src/operation.rs index 486a5dd47..20974c83f 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -52,7 +52,6 @@ use crate::{ }; pub(crate) use abort_transaction::AbortTransaction; -pub(crate) use bulk_write::{BulkWrite, BulkWriteOperationResponse, BulkWriteSummaryInfo}; pub(crate) use commit_transaction::CommitTransaction; pub(crate) use create_indexes::CreateIndexes; pub(crate) use delete::Delete; diff --git a/src/selection_criteria.rs b/src/selection_criteria.rs index bc4e03718..640475a6d 100644 --- a/src/selection_criteria.rs +++ b/src/selection_criteria.rs @@ -53,14 +53,6 @@ impl SelectionCriteria { } } - #[cfg(test)] - pub(crate) fn as_predicate(&self) -> Option<&Predicate> { - match self { - Self::Predicate(ref p) => Some(p), - _ => None, - } - } - pub(crate) fn from_address(address: ServerAddress) -> Self { SelectionCriteria::Predicate(Arc::new(move |server| server.address() == &address)) } From 1c8fbc2789f30e912c9bb056915064cea77c334e Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 29 Feb 2024 15:06:22 -0700 Subject: [PATCH 10/75] resync retryable writes/transactions --- .../client-bulkWrite-serverErrors.json | 130 ++++++++++-------- .../unified/client-bulkWrite.json | 47 +++---- 2 files changed, 88 insertions(+), 89 deletions(-) diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json index 8d3324b59..caab0b546 100644 --- a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite retryable writes", - "schemaVersion": "1.18", + "schemaVersion": "1.20", "runOnRequirements": [ { "minServerVersion": "8.0", @@ -15,7 +15,8 @@ "id": "client0", "observeEvents": [ "commandStartedEvent" - ] + ], + "useMultipleMongoses": false } }, { @@ -53,6 +54,9 @@ ] } ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, "tests": [ { "description": "client bulkWrite with no multi: true operations succeeds after retryable top-level error", @@ -86,10 +90,7 @@ "models": [ { "insertOne": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "document": { "_id": 4, "x": 44 @@ -98,10 +99,7 @@ }, { "updateOne": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "filter": { "_id": 1 }, @@ -114,10 +112,7 @@ }, { "replaceOne": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "filter": { "_id": 2 }, @@ -128,16 +123,14 @@ }, { "deleteOne": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "filter": { "_id": 3 } } } - ] + ], + "verboseResults": true }, "expectResult": { "insertedCount": 1, @@ -146,13 +139,26 @@ "modifiedCount": 2, "deletedCount": 1, "insertResults": { - "$$unsetOrMatches": {} + "0": { + "insertedId": 4 + } }, "updateResults": { - "$$unsetOrMatches": {} + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + } }, "deleteResults": { - "$$unsetOrMatches": {} + "3": { + "deletedCount": 1 + } } } } @@ -166,6 +172,9 @@ "commandName": "bulkWrite", "databaseName": "admin", "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "insert": 0, @@ -217,6 +226,9 @@ "commandName": "bulkWrite", "databaseName": "admin", "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "insert": 0, @@ -319,10 +331,7 @@ "models": [ { "updateMany": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "filter": { "_id": 1 }, @@ -335,10 +344,7 @@ }, { "deleteMany": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "filter": { "_id": 3 } @@ -363,6 +369,9 @@ "commandName": "bulkWrite", "databaseName": "admin", "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, "ops": [ { "update": 0, @@ -431,10 +440,7 @@ "models": [ { "insertOne": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "document": { "_id": 4, "x": 44 @@ -443,10 +449,7 @@ }, { "updateOne": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "filter": { "_id": 1 }, @@ -459,10 +462,7 @@ }, { "replaceOne": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "filter": { "_id": 2 }, @@ -473,16 +473,14 @@ }, { "deleteOne": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "filter": { "_id": 3 } } } - ] + ], + "verboseResults": true }, "expectResult": { "insertedCount": 1, @@ -491,13 +489,26 @@ "modifiedCount": 2, "deletedCount": 1, "insertResults": { - "$$unsetOrMatches": {} + "0": { + "insertedId": 4 + } }, "updateResults": { - "$$unsetOrMatches": {} + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": null + } }, "deleteResults": { - "$$unsetOrMatches": {} + "3": { + "deletedCount": 1 + } } } } @@ -511,6 +522,9 @@ "commandName": "bulkWrite", "databaseName": "admin", "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "insert": 0, @@ -562,6 +576,9 @@ "commandName": "bulkWrite", "databaseName": "admin", "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "insert": 0, @@ -647,10 +664,7 @@ "models": [ { "updateMany": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "filter": { "_id": 1 }, @@ -663,10 +677,7 @@ }, { "deleteMany": { - "namespace": { - "db": "retryable-writes-tests", - "coll": "coll0" - }, + "namespace": "retryable-writes-tests.coll0", "filter": { "_id": 3 } @@ -693,6 +704,9 @@ "commandName": "bulkWrite", "databaseName": "admin", "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, "ops": [ { "update": 0, diff --git a/src/test/spec/json/transactions/unified/client-bulkWrite.json b/src/test/spec/json/transactions/unified/client-bulkWrite.json index 74e99f77a..24f69f452 100644 --- a/src/test/spec/json/transactions/unified/client-bulkWrite.json +++ b/src/test/spec/json/transactions/unified/client-bulkWrite.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite transactions", - "schemaVersion": "1.18", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "8.0", @@ -41,6 +41,9 @@ } } ], + "_yamlAnchors": { + "namespace": "transactions-tests.coll0" + }, "initialData": [ { "databaseName": "transaction-tests", @@ -75,7 +78,7 @@ ], "tests": [ { - "description": "transactional client bulkWrite", + "description": "client bulkWrite in a transaction", "operations": [ { "object": "session0", @@ -89,10 +92,7 @@ "models": [ { "insertOne": { - "namespace": { - "db": "transaction-tests", - "coll": "coll0" - }, + "namespace": "transactions-tests.coll0", "document": { "_id": 8, "x": 88 @@ -101,10 +101,7 @@ }, { "updateOne": { - "namespace": { - "db": "transaction-tests", - "coll": "coll0" - }, + "namespace": "transactions-tests.coll0", "filter": { "_id": 1 }, @@ -117,10 +114,7 @@ }, { "updateMany": { - "namespace": { - "db": "transaction-tests", - "coll": "coll0" - }, + "namespace": "transactions-tests.coll0", "filter": { "$and": [ { @@ -144,10 +138,7 @@ }, { "replaceOne": { - "namespace": { - "db": "transaction-tests", - "coll": "coll0" - }, + "namespace": "transactions-tests.coll0", "filter": { "_id": 4 }, @@ -159,10 +150,7 @@ }, { "deleteOne": { - "namespace": { - "db": "transaction-tests", - "coll": "coll0" - }, + "namespace": "transactions-tests.coll0", "filter": { "_id": 5 } @@ -170,10 +158,7 @@ }, { "deleteMany": { - "namespace": { - "db": "transaction-tests", - "coll": "coll0" - }, + "namespace": "transactions-tests.coll0", "filter": { "$and": [ { @@ -245,7 +230,6 @@ "commandName": "bulkWrite", "databaseName": "admin", "command": { - "bulkWrite": 1, "lsid": { "$$sessionLsid": "session0" }, @@ -255,6 +239,9 @@ "writeConcern": { "$$exists": false }, + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, "ops": [ { "insert": 0, @@ -337,11 +324,9 @@ ], "nsInfo": [ { - "ns": "transaction-tests.coll0" + "ns": "transactions-tests.coll0" } - ], - "errorsOnly": false, - "ordered": true + ] } } }, From 84203d78dc6b8d23952f0648b3d5a1a2b8fbb909 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 29 Feb 2024 15:52:37 -0700 Subject: [PATCH 11/75] fix transactions test typo --- .../transactions/unified/client-bulkWrite.json | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/test/spec/json/transactions/unified/client-bulkWrite.json b/src/test/spec/json/transactions/unified/client-bulkWrite.json index 24f69f452..4fbb4e1f2 100644 --- a/src/test/spec/json/transactions/unified/client-bulkWrite.json +++ b/src/test/spec/json/transactions/unified/client-bulkWrite.json @@ -42,7 +42,7 @@ } ], "_yamlAnchors": { - "namespace": "transactions-tests.coll0" + "namespace": "transaction-tests.coll0" }, "initialData": [ { @@ -92,7 +92,7 @@ "models": [ { "insertOne": { - "namespace": "transactions-tests.coll0", + "namespace": "transaction-tests.coll0", "document": { "_id": 8, "x": 88 @@ -101,7 +101,7 @@ }, { "updateOne": { - "namespace": "transactions-tests.coll0", + "namespace": "transaction-tests.coll0", "filter": { "_id": 1 }, @@ -114,7 +114,7 @@ }, { "updateMany": { - "namespace": "transactions-tests.coll0", + "namespace": "transaction-tests.coll0", "filter": { "$and": [ { @@ -138,7 +138,7 @@ }, { "replaceOne": { - "namespace": "transactions-tests.coll0", + "namespace": "transaction-tests.coll0", "filter": { "_id": 4 }, @@ -150,7 +150,7 @@ }, { "deleteOne": { - "namespace": "transactions-tests.coll0", + "namespace": "transaction-tests.coll0", "filter": { "_id": 5 } @@ -158,7 +158,7 @@ }, { "deleteMany": { - "namespace": "transactions-tests.coll0", + "namespace": "transaction-tests.coll0", "filter": { "$and": [ { @@ -324,7 +324,7 @@ ], "nsInfo": [ { - "ns": "transactions-tests.coll0" + "ns": "transaction-tests.coll0" } ] } From a491642f37ec6f6bab4035274fae174d68192fb3 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 29 Feb 2024 15:55:25 -0700 Subject: [PATCH 12/75] rustdoc workaround --- src/action/bulk_write.rs | 4 ---- src/options.rs | 1 + 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index d158f2073..68e1f06ea 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -101,13 +101,9 @@ impl<'a> BulkWrite<'a> { impl<'a> BulkWrite<'a> { option_setters!(options: BulkWriteOptions; ordered: bool, - bypass_document_validation: bool, - comment: Bson, - let_vars: Document, - verbose_results: bool, ); diff --git a/src/options.rs b/src/options.rs index b5f7ac37a..cf7812c48 100644 --- a/src/options.rs +++ b/src/options.rs @@ -16,6 +16,7 @@ //! ``` pub use crate::{ + action::bulk_write::BulkWriteOptions, change_stream::options::*, client::{auth::*, options::*}, coll::options::*, From 47a5975ffbdcef1f8401b464741f0c7c70e2e272 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 4 Mar 2024 13:36:25 -0700 Subject: [PATCH 13/75] resync tests --- Cargo.toml.orig | 175 +++++++++ src/action/bulk_write/write_models.rs | 4 +- src/operation.rs | 4 +- .../client-bulkWrite-delete-options.json | 2 +- .../client-bulkWrite-delete-options.yml | 136 +++++++ .../client-bulkWrite-errors.json | 52 ++- .../client-bulkWrite-errors.yml | 228 ++++++++++++ .../client-bulkWrite-mixed-namespaces.json | 8 +- .../client-bulkWrite-mixed-namespaces.yml | 146 ++++++++ .../client-bulkWrite-options.json | 2 +- .../client-bulkWrite-options.yml | 186 ++++++++++ .../client-bulkWrite-ordered.json | 2 +- .../client-bulkWrite-ordered.yml | 152 ++++++++ .../client-bulkWrite-results.json | 2 +- .../client-bulkWrite-results.yml | 311 ++++++++++++++++ .../client-bulkWrite-update-options.json | 2 +- .../client-bulkWrite-update-options.yml | 337 ++++++++++++++++++ 17 files changed, 1725 insertions(+), 24 deletions(-) create mode 100644 Cargo.toml.orig create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.yml create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.yml create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml diff --git a/Cargo.toml.orig b/Cargo.toml.orig new file mode 100644 index 000000000..9d0ed742b --- /dev/null +++ b/Cargo.toml.orig @@ -0,0 +1,175 @@ +[package] +authors = [ + "Saghm Rossi ", + "Patrick Freed ", + "Isabel Atkinson ", + "Abraham Egnor ", + "Kaitlin Mahar ", +] +description = "The official MongoDB driver for Rust" +edition = "2021" +keywords = ["mongo", "mongodb", "database", "bson", "nosql"] +categories = ["asynchronous", "database", "web-programming"] +repository = "https://github.com/mongodb/mongo-rust-driver" +homepage = "https://www.mongodb.com/docs/drivers/rust/" +license = "Apache-2.0" +readme = "README.md" +name = "mongodb" +version = "2.8.0" + +exclude = [ + "etc/**", + "rustfmt.toml", + ".evergreen/**", + ".gitignore", + "src/test/**", + "tests/**", +] + +[features] +default = [] +sync = [] +openssl-tls = ["openssl", "openssl-probe", "tokio-openssl"] + +# Enable support for v0.4 of the chrono crate in the public API of the BSON library. +bson-chrono-0_4 = ["bson/chrono-0_4"] + +# Enable support for the serde_with crate in the BSON library. +bson-serde_with = ["bson/serde_with"] + +# Enable support for v0.8 of the uuid crate in the public API of the BSON library. +bson-uuid-0_8 = ["bson/uuid-0_8"] + +# Enable support for v1.x of the uuid crate in the public API of the BSON library. +bson-uuid-1 = ["bson/uuid-1"] + +# Enable support for MONGODB-AWS authentication. +# This can only be used with the tokio-runtime feature flag. +aws-auth = ["reqwest"] + +# Enable support for on-demand Azure KMS credentials. +# This can only be used with the tokio-runtime feature flag. +azure-kms = ["reqwest"] + +# Enable support for on-demand GCP KMS credentials. +# This can only be used with the tokio-runtime feature flag. +gcp-kms = ["reqwest"] + +zstd-compression = ["zstd"] +zlib-compression = ["flate2"] +snappy-compression = ["snap"] + +# Enables support for client-side field level encryption and queryable encryption. +# The In Use Encryption API is unstable and may have backwards-incompatible changes in minor version updates. +in-use-encryption-unstable = ["mongocrypt", "rayon", "num_cpus"] + +# Enables support for emitting tracing events. +# The tracing API is unstable and may have backwards-incompatible changes in minor version updates. +# TODO: pending https://github.com/tokio-rs/tracing/issues/2036 stop depending directly on log. +tracing-unstable = ["tracing", "log"] + +[dependencies] +async-trait = "0.1.42" +base64 = "0.13.0" +bitflags = "1.1.0" +bson = { git = "https://github.com/mongodb/bson-rust", branch = "main" } +chrono = { version = "0.4.7", default-features = false, features = ["clock", "std"] } +derivative = "2.1.1" +derive_more = "0.99.17" +flate2 = { version = "1.0", optional = true } +futures-io = "0.3.21" +futures-core = "0.3.14" +futures-util = { version = "0.3.14", features = ["io"] } +futures-executor = "0.3.14" +hex = "0.4.0" +hmac = "0.12.1" +once_cell = "1.19.0" +log = { version = "0.4.17", optional = true } +md-5 = "0.10.1" +mongocrypt = { git = "https://github.com/mongodb/libmongocrypt-rust.git", branch = "main", optional = true } +num_cpus = { version = "1.13.1", optional = true } +openssl = { version = "0.10.38", optional = true } +openssl-probe = { version = "0.1.5", optional = true } +percent-encoding = "2.0.0" +rand = { version = "0.8.3", features = ["small_rng"] } +rayon = { version = "1.5.3", optional = true } +rustc_version_runtime = "0.2.1" +rustls-pemfile = "1.0.1" +serde_with = "1.3.1" +sha-1 = "0.10.0" +sha2 = "0.10.2" +snap = { version = "1.0.5", optional = true } +socket2 = "0.5.5" +stringprep = "0.1.2" +strsim = "0.10.0" +take_mut = "0.2.2" +thiserror = "1.0.24" +tokio-openssl = { version = "0.6.3", optional = true } +tracing = { version = "0.1.36", optional = true } +trust-dns-proto = "0.21.2" +trust-dns-resolver = "0.21.2" +typed-builder = "0.10.0" +webpki-roots = "0.25.2" +zstd = { version = "0.11.2", optional = true } + +[dependencies.pbkdf2] +version = "0.11.0" +default-features = false + +[dependencies.reqwest] +version = "0.11.2" +optional = true +default-features = false +features = ["json", "rustls-tls"] + +[dependencies.rustls] +version = "0.21.6" +features = ["dangerous_configuration"] + +[dependencies.serde] +version = "1.0.125" +features = ["derive"] + +[dependencies.serde_bytes] +version = "0.11.5" + +[dependencies.tokio] +version = "1.17.0" +features = ["io-util", "sync", "macros", "net", "process", "rt", "time"] + +[dependencies.tokio-rustls] +version = "0.24.1" +features = ["dangerous_configuration"] + +[dependencies.tokio-util] +version = "0.7.0" +features = ["io", "compat"] + +[dependencies.uuid] +version = "1.1.2" +features = ["v4"] + +[dev-dependencies] +anyhow = { version = "1.0", features = ["backtrace"] } +approx = "0.5.1" +backtrace = { version = "0.3.68" } +ctrlc = "3.2.2" +function_name = "0.2.1" +futures = "0.3" +hex = "0.4" +home = "0.5" +lambda_runtime = "0.6.0" +pretty_assertions = "1.3.0" +serde = { version = ">= 0.0.0", features = ["rc"] } +serde_json = "1.0.64" +semver = "1.0.0" +time = "0.3.9" +tokio = { version = ">= 0.0.0", features = ["fs", "parking_lot"] } +tracing-subscriber = "0.3.16" +regex = "1.6.0" +serde-hex = "0.1.0" +serde_path_to_error = "0.1.15" + +[package.metadata.docs.rs] +rustdoc-args = ["--cfg", "docsrs"] +all-features = true diff --git a/src/action/bulk_write/write_models.rs b/src/action/bulk_write/write_models.rs index ee60add9d..437fa32d9 100644 --- a/src/action/bulk_write/write_models.rs +++ b/src/action/bulk_write/write_models.rs @@ -125,8 +125,8 @@ impl WriteModel { } } - // Returns the operation-specific fields that should be included in this model's entry in the - // ops array. Also returns an inserted ID if this is an insert operation. + /// Returns the operation-specific fields that should be included in this model's entry in the + /// ops array. Also returns an inserted ID if this is an insert operation. pub(crate) fn get_ops_document_contents(&self) -> Result<(RawDocumentBuf, Option)> { let (mut model_document, inserted_id) = match self { Self::InsertOne { document, .. } => { diff --git a/src/operation.rs b/src/operation.rs index 20974c83f..e80b0c4b8 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -86,7 +86,9 @@ impl<'a, O> OperationResponse<'a, O> { fn get_sync_result(self) -> Result { match self { Self::Sync(result) => result, - Self::Async(_) => unreachable!(), + Self::Async(_) => Err(Error::internal( + "get_sync_result was called on an async response", + )), } } } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json index 8ad03a200..5bdf2b124 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite delete options", - "schemaVersion": "1.0", + "schemaVersion": "1.1", "runOnRequirements": [ { "minServerVersion": "8.0" diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.yml new file mode 100644 index 000000000..db8b9f46d --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.yml @@ -0,0 +1,136 @@ +description: "client bulkWrite delete options" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + collation: &collation { "locale": "simple" } + hint: &hint _id_ + +tests: + - description: "client bulk write delete with collation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + collation: *collation + - deleteMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + collation: *collation + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 3 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 1: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - delete: 0 + filter: { _id: 1 } + collation: *collation + multi: false + - delete: 0 + filter: { _id: { $gt: 1 } } + collation: *collation + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] + - description: "client bulk write delete with hint" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + hint: *hint + - deleteMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + hint: *hint + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 3 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 1: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - delete: 0 + filter: { _id: 1 } + hint: *hint + multi: false + - delete: 0 + filter: { _id: { $gt: 1 } } + hint: *hint + multi: true + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json index 9719a9d97..1404fad42 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json @@ -59,7 +59,8 @@ "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" - } + }, + "undefinedVarCode": 17276 }, "tests": [ { @@ -144,11 +145,6 @@ }, { "description": "an individual operation fails during an unordered bulkWrite", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -229,11 +225,6 @@ }, { "description": "detailed results are omitted from error when verboseResults is false", - "runOnRequirements": [ - { - "minServerVersion": "8.0" - } - ], "operations": [ { "object": "client0", @@ -302,8 +293,8 @@ "description": "a top-level failure occurs during a bulkWrite", "operations": [ { - "name": "failPoint", "object": "testRunner", + "name": "failPoint", "arguments": { "client": "client0", "failPoint": { @@ -342,6 +333,43 @@ } ] }, + { + "description": "a bulk write with only errors does not report a partial result", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": {} + }, + "writeErrors": { + "0": { + "code": 17276 + } + } + } + } + ] + }, { "description": "a write concern error occurs during a bulkWrite", "operations": [ diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml new file mode 100644 index 000000000..e05bef220 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml @@ -0,0 +1,228 @@ +description: "client bulkWrite errors" +schemaVersion: "1.20" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + uriOptions: + retryWrites: false + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + writeConcernError: &writeConcernError + code: 91 + errmsg: "Replication is being shut down" + undefinedVarCode: &undefinedVarCode 17276 # Use of an undefined variable + +tests: + - description: "an individual operation fails during an ordered bulkWrite" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 1 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + writeErrors: + 1: + code: *undefinedVarCode + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "an individual operation fails during an unordered bulkWrite" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + ordered: false + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 2 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 2: + deletedCount: 1 + writeErrors: + 1: + code: *undefinedVarCode + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 2, x: 22 } + - description: "detailed results are omitted from error when verboseResults is false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: false + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 1 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + writeErrors: + 1: + code: *undefinedVarCode + - description: "a top-level failure occurs during a bulkWrite" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - bulkWrite + errorCode: 8 # UnknownError + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { x: 1 } + verboseResults: true + expectError: + errorCode: 8 + - description: "a bulk write with only errors does not report a partial result" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + verboseResults: true + expectError: + expectResult: + $$unsetOrMatches: {} # Empty or nonexistent result when no successful writes occurred + writeErrors: + 0: + code: *undefinedVarCode + - description: "a write concern error occurs during a bulkWrite" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - bulkWrite + writeConcernError: *writeConcernError + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 10 } + verboseResults: true + expectError: + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 10 + updateResults: {} + deleteResults: {} + writeConcernErrors: [ *writeConcernError ] diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json index 6db78d2ac..33df3257c 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite with mixed namespaces", - "schemaVersion": "1.0", + "schemaVersion": "1.1", "runOnRequirements": [ { "minServerVersion": "8.0" @@ -152,7 +152,7 @@ "_id": 4 }, "replacement": { - "x": 44 + "x": 45 } } } @@ -249,7 +249,7 @@ "_id": 4 }, "updateMods": { - "x": 44 + "x": 45 }, "multi": false } @@ -300,7 +300,7 @@ "documents": [ { "_id": 4, - "x": 44 + "x": 45 } ] } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml new file mode 100644 index 000000000..a34870e05 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml @@ -0,0 +1,146 @@ +description: "client bulkWrite with mixed namespaces" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name db0 + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + - collection: + id: &collection1 collection1 + database: *database0 + collectionName: &collection1Name coll1 + - database: + id: &database1 database1 + client: *client0 + databaseName: &database1Name db1 + - collection: + id: &collection2 collection2 + database: *database1 + collectionName: &collection2Name coll2 + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] + - databaseName: *database0Name + collectionName: *collection1Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - databaseName: *database1Name + collectionName: *collection2Name + documents: + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + +_yamlAnchors: + db0Coll0Namespace: &db0Coll0Namespace "db0.coll0" + db0Coll1Namespace: &db0Coll1Namespace "db0.coll1" + db1Coll2Namespace: &db1Coll2Namespace "db1.coll2" + +tests: + - description: "client bulkWrite with mixed namespaces" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *db0Coll0Namespace + document: { _id: 1 } + - insertOne: + namespace: *db0Coll0Namespace + document: { _id: 2 } + - updateOne: + namespace: *db0Coll1Namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - deleteOne: + namespace: *db1Coll2Namespace + filter: { _id: 3 } + - deleteOne: + namespace: *db0Coll1Namespace + filter: { _id: 2 } + - replaceOne: + namespace: *db1Coll2Namespace + filter: { _id: 4 } + replacement: { x: 45 } + verboseResults: true + expectResult: + insertedCount: 2 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 2 + insertResults: + 0: + insertedId: 1 + 1: + insertedId: 2 + updateResults: + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: null + 5: + matchedCount: 1 + modifiedCount: 1 + upsertedId: null + deleteResults: + 3: + deletedCount: 1 + 4: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + bulkWrite: 1 + ops: + - insert: 0 + document: { _id: 1 } + - insert: 0 + document: { _id: 2 } + - update: 1 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - delete: 2 + filter: { _id: 3 } + multi: false + - delete: 1 + filter: { _id: 2 } + multi: false + - update: 2 + filter: { _id: 4 } + updateMods: { x: 45 } + multi: false + nsInfo: + - ns: *db0Coll0Namespace + - ns: *db0Coll1Namespace + - ns: *db1Coll2Namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1 } + - { _id: 2 } + - databaseName: *database0Name + collectionName: *collection1Name + documents: + - { _id: 1, x: 12 } + - databaseName: *database1Name + collectionName: *collection2Name + documents: + - { _id: 4, x: 45 } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json index d92be7b19..fd1a39300 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite top-level options", - "schemaVersion": "1.0", + "schemaVersion": "1.1", "runOnRequirements": [ { "minServerVersion": "8.0" diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml new file mode 100644 index 000000000..1ef6e3192 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml @@ -0,0 +1,186 @@ +description: "client bulkWrite top-level options" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + comment: &comment { "bulk": "write" } + let: &let { id1: 1, id2: 2 } + +tests: + - description: "client bulkWrite comment" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + comment: *comment + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + comment: *comment + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite bypassDocumentValidation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + bypassDocumentValidation: true + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + bypassDocumentValidation: true + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite let" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id1" ] + update: + $inc: { x: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] + let: *let + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 1 + modifiedCount: 1 + deletedCount: 1 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: null + deleteResults: + 1: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + let: *let + ops: + - update: 0 + filter: + $expr: + $eq: [ "$_id", "$$id1" ] + updateMods: { $inc: { x: 1 } } + multi: false + - delete: 0 + filter: + $expr: + $eq: [ "$_id", "$$id2" ] + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, x: 12 } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json index 5ce983f63..a55d6619b 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite with ordered option", - "schemaVersion": "1.0", + "schemaVersion": "1.1", "runOnRequirements": [ { "minServerVersion": "8.0" diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.yml new file mode 100644 index 000000000..dc56dcb86 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.yml @@ -0,0 +1,152 @@ +description: "client bulkWrite with ordered option" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite with ordered: false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + ordered: false + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: false + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - description: "client bulkWrite with ordered: true" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + ordered: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - description: "client bulkWrite defaults to ordered: true" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json index 92aa73464..726d15ffd 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite results", - "schemaVersion": "1.0", + "schemaVersion": "1.1", "runOnRequirements": [ { "minServerVersion": "8.0" diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml new file mode 100644 index 000000000..b4731f193 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml @@ -0,0 +1,311 @@ +description: "client bulkWrite results" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite with verboseResults: true returns detailed results" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + 0: + insertedId: 8 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: null + 2: + matchedCount: 2 + modifiedCount: 2 + upsertedId: null + 3: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 4 + deleteResults: + 4: + deletedCount: 1 + 5: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: "client bulkWrite with verboseResults: false omits detailed results" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: false + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: "client bulkWrite defaults to verboseResults: false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json index d163775bd..5d5386402 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite update options", - "schemaVersion": "1.0", + "schemaVersion": "1.1", "runOnRequirements": [ { "minServerVersion": "8.0" diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml new file mode 100644 index 000000000..a04cedea6 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml @@ -0,0 +1,337 @@ +description: "client bulkWrite update options" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, array: [ 1, 2, 3 ] } + - { _id: 2, array: [ 1, 2, 3 ] } + - { _id: 3, array: [ 1, 2, 3 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + collation: &collation { "locale": "simple" } + hint: &hint _id_ + +tests: + - description: "client bulkWrite update with arrayFilters" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $set: + array.$[i]: 4 + arrayFilters: [ i: { $gte: 2 } ] + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: + $set: + array.$[i]: 5 + arrayFilters: [ i: { $gte: 2 } ] + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: null + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: null + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $set: + array.$[i]: 4 + arrayFilters: [ i: { $gte: 2 } ] + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: + $set: + array.$[i]: 5 + arrayFilters: [ i: { $gte: 2 } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 4, 4 ] } + - { _id: 2, array: [ 1, 5, 5 ] } + - { _id: 3, array: [ 1, 5, 5 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + - description: "client bulkWrite update with collation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $set: { array: [ 1, 2, 4 ] } } + collation: *collation + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $set: { array: [ 1, 2, 5 ] } } + collation: *collation + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { array: [ 1, 2, 6 ] } + collation: *collation + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 4 + modifiedCount: 4 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: null + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: null + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: null + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + collation: *collation + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $set: { array: [ 1, 2, 5 ] } } + collation: *collation + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { array: [ 1, 2, 6 ] } + collation: *collation + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 4 ] } + - { _id: 2, array: [ 1, 2, 5 ] } + - { _id: 3, array: [ 1, 2, 5 ] } + - { _id: 4, array: [ 1, 2, 6 ] } + - description: "client bulkWrite update with hint" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $set: { array: [ 1, 2, 4 ] } } + hint: *hint + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $set: { array: [ 1, 2, 5 ] } } + hint: *hint + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { array: [ 1, 2, 6 ] } + hint: *hint + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 4 + modifiedCount: 4 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: null + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: null + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: null + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + hint: *hint + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $set: { array: [ 1, 2, 5 ] } } + hint: *hint + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { array: [ 1, 2, 6 ] } + hint: *hint + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 4 ] } + - { _id: 2, array: [ 1, 2, 5 ] } + - { _id: 3, array: [ 1, 2, 5 ] } + - { _id: 4, array: [ 1, 2, 6 ] } + - description: "client bulkWrite update with upsert" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 5 } + update: { $set: { array: [ 1, 2, 4 ] } } + upsert: true + - replaceOne: + namespace: *namespace + filter: { _id: 6 } + replacement: { array: [ 1, 2, 6 ] } + upsert: true + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 2 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 5 + 1: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 6 + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 5 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + upsert: true + multi: false + - update: 0 + filter: { _id: 6 } + updateMods: { array: [ 1, 2, 6 ] } + upsert: true + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 3 ] } + - { _id: 2, array: [ 1, 2, 3 ] } + - { _id: 3, array: [ 1, 2, 3 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + - { _id: 5, array: [ 1, 2, 4 ] } + - { _id: 6, array: [ 1, 2, 6 ] } From 29938faa0caa15d3e878e6df69ff1a29c4c0b9f7 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 4 Mar 2024 13:36:42 -0700 Subject: [PATCH 14/75] remove extra file --- Cargo.toml.orig | 175 ------------------------------------------------ 1 file changed, 175 deletions(-) delete mode 100644 Cargo.toml.orig diff --git a/Cargo.toml.orig b/Cargo.toml.orig deleted file mode 100644 index 9d0ed742b..000000000 --- a/Cargo.toml.orig +++ /dev/null @@ -1,175 +0,0 @@ -[package] -authors = [ - "Saghm Rossi ", - "Patrick Freed ", - "Isabel Atkinson ", - "Abraham Egnor ", - "Kaitlin Mahar ", -] -description = "The official MongoDB driver for Rust" -edition = "2021" -keywords = ["mongo", "mongodb", "database", "bson", "nosql"] -categories = ["asynchronous", "database", "web-programming"] -repository = "https://github.com/mongodb/mongo-rust-driver" -homepage = "https://www.mongodb.com/docs/drivers/rust/" -license = "Apache-2.0" -readme = "README.md" -name = "mongodb" -version = "2.8.0" - -exclude = [ - "etc/**", - "rustfmt.toml", - ".evergreen/**", - ".gitignore", - "src/test/**", - "tests/**", -] - -[features] -default = [] -sync = [] -openssl-tls = ["openssl", "openssl-probe", "tokio-openssl"] - -# Enable support for v0.4 of the chrono crate in the public API of the BSON library. -bson-chrono-0_4 = ["bson/chrono-0_4"] - -# Enable support for the serde_with crate in the BSON library. -bson-serde_with = ["bson/serde_with"] - -# Enable support for v0.8 of the uuid crate in the public API of the BSON library. -bson-uuid-0_8 = ["bson/uuid-0_8"] - -# Enable support for v1.x of the uuid crate in the public API of the BSON library. -bson-uuid-1 = ["bson/uuid-1"] - -# Enable support for MONGODB-AWS authentication. -# This can only be used with the tokio-runtime feature flag. -aws-auth = ["reqwest"] - -# Enable support for on-demand Azure KMS credentials. -# This can only be used with the tokio-runtime feature flag. -azure-kms = ["reqwest"] - -# Enable support for on-demand GCP KMS credentials. -# This can only be used with the tokio-runtime feature flag. -gcp-kms = ["reqwest"] - -zstd-compression = ["zstd"] -zlib-compression = ["flate2"] -snappy-compression = ["snap"] - -# Enables support for client-side field level encryption and queryable encryption. -# The In Use Encryption API is unstable and may have backwards-incompatible changes in minor version updates. -in-use-encryption-unstable = ["mongocrypt", "rayon", "num_cpus"] - -# Enables support for emitting tracing events. -# The tracing API is unstable and may have backwards-incompatible changes in minor version updates. -# TODO: pending https://github.com/tokio-rs/tracing/issues/2036 stop depending directly on log. -tracing-unstable = ["tracing", "log"] - -[dependencies] -async-trait = "0.1.42" -base64 = "0.13.0" -bitflags = "1.1.0" -bson = { git = "https://github.com/mongodb/bson-rust", branch = "main" } -chrono = { version = "0.4.7", default-features = false, features = ["clock", "std"] } -derivative = "2.1.1" -derive_more = "0.99.17" -flate2 = { version = "1.0", optional = true } -futures-io = "0.3.21" -futures-core = "0.3.14" -futures-util = { version = "0.3.14", features = ["io"] } -futures-executor = "0.3.14" -hex = "0.4.0" -hmac = "0.12.1" -once_cell = "1.19.0" -log = { version = "0.4.17", optional = true } -md-5 = "0.10.1" -mongocrypt = { git = "https://github.com/mongodb/libmongocrypt-rust.git", branch = "main", optional = true } -num_cpus = { version = "1.13.1", optional = true } -openssl = { version = "0.10.38", optional = true } -openssl-probe = { version = "0.1.5", optional = true } -percent-encoding = "2.0.0" -rand = { version = "0.8.3", features = ["small_rng"] } -rayon = { version = "1.5.3", optional = true } -rustc_version_runtime = "0.2.1" -rustls-pemfile = "1.0.1" -serde_with = "1.3.1" -sha-1 = "0.10.0" -sha2 = "0.10.2" -snap = { version = "1.0.5", optional = true } -socket2 = "0.5.5" -stringprep = "0.1.2" -strsim = "0.10.0" -take_mut = "0.2.2" -thiserror = "1.0.24" -tokio-openssl = { version = "0.6.3", optional = true } -tracing = { version = "0.1.36", optional = true } -trust-dns-proto = "0.21.2" -trust-dns-resolver = "0.21.2" -typed-builder = "0.10.0" -webpki-roots = "0.25.2" -zstd = { version = "0.11.2", optional = true } - -[dependencies.pbkdf2] -version = "0.11.0" -default-features = false - -[dependencies.reqwest] -version = "0.11.2" -optional = true -default-features = false -features = ["json", "rustls-tls"] - -[dependencies.rustls] -version = "0.21.6" -features = ["dangerous_configuration"] - -[dependencies.serde] -version = "1.0.125" -features = ["derive"] - -[dependencies.serde_bytes] -version = "0.11.5" - -[dependencies.tokio] -version = "1.17.0" -features = ["io-util", "sync", "macros", "net", "process", "rt", "time"] - -[dependencies.tokio-rustls] -version = "0.24.1" -features = ["dangerous_configuration"] - -[dependencies.tokio-util] -version = "0.7.0" -features = ["io", "compat"] - -[dependencies.uuid] -version = "1.1.2" -features = ["v4"] - -[dev-dependencies] -anyhow = { version = "1.0", features = ["backtrace"] } -approx = "0.5.1" -backtrace = { version = "0.3.68" } -ctrlc = "3.2.2" -function_name = "0.2.1" -futures = "0.3" -hex = "0.4" -home = "0.5" -lambda_runtime = "0.6.0" -pretty_assertions = "1.3.0" -serde = { version = ">= 0.0.0", features = ["rc"] } -serde_json = "1.0.64" -semver = "1.0.0" -time = "0.3.9" -tokio = { version = ">= 0.0.0", features = ["fs", "parking_lot"] } -tracing-subscriber = "0.3.16" -regex = "1.6.0" -serde-hex = "0.1.0" -serde_path_to_error = "0.1.15" - -[package.metadata.docs.rs] -rustdoc-args = ["--cfg", "docsrs"] -all-features = true From 040d80723ee78fa74b060298c5ec4167c5d4eb81 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 4 Mar 2024 13:50:46 -0700 Subject: [PATCH 15/75] fix msrv failure --- src/operation/bulk_write.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 6b72bc3dc..d03bed519 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -56,7 +56,7 @@ impl<'a> BulkWrite<'a> { models: &'a [WriteModel], offset: usize, options: Option<&'a BulkWriteOptions>, - ) -> Self { + ) -> BulkWrite<'a> { let encrypted = client.should_auto_encrypt().await; Self { client, From d42fe0e2c7f3723f931eab9b34d73aa13b850ae3 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 4 Mar 2024 14:50:02 -0700 Subject: [PATCH 16/75] reorg --- src/action/bulk_write.rs | 73 +++---------------- src/action/bulk_write/error.rs | 7 +- src/client/options.rs | 2 + .../options/bulk_write.rs} | 47 +++++++++++- src/error.rs | 5 +- src/error/bulk_write.rs | 34 +++++++++ src/operation/bulk_write.rs | 23 ++---- src/operation/bulk_write/server_responses.rs | 7 +- src/options.rs | 1 - src/results.rs | 4 + .../results.rs => results/bulk_write.rs} | 0 src/test/bulk_write.rs | 2 +- .../unified_runner/operation/bulk_write.rs | 2 +- src/test/spec/unified_runner/test_file.rs | 5 +- 14 files changed, 116 insertions(+), 96 deletions(-) rename src/{action/bulk_write/write_models.rs => client/options/bulk_write.rs} (75%) create mode 100644 src/error/bulk_write.rs rename src/{action/bulk_write/results.rs => results/bulk_write.rs} (100%) diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index 68e1f06ea..f0f37b1de 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -1,77 +1,25 @@ #![allow(missing_docs)] -pub(crate) mod error; -pub(crate) mod results; -pub(crate) mod write_models; - use std::collections::HashMap; -use serde::{ser::SerializeMap, Deserialize, Serialize}; -use serde_with::skip_serializing_none; - use crate::{ bson::{Bson, Document}, - error::{Error, ErrorKind, Result}, + error::{ClientBulkWriteError, Error, ErrorKind, Result}, operation::bulk_write::BulkWrite as BulkWriteOperation, + options::{BulkWriteOptions, WriteModel}, + results::BulkWriteResult, Client, ClientSession, }; use super::{action_impl, option_setters}; -use error::BulkWriteError; -use results::BulkWriteResult; -use write_models::WriteModel; - impl Client { pub fn bulk_write(&self, models: impl IntoIterator) -> BulkWrite { BulkWrite::new(self.clone(), models.into_iter().collect()) } } -#[skip_serializing_none] -#[derive(Clone, Debug, Default, Deserialize)] -#[serde(rename_all = "camelCase")] -#[non_exhaustive] -pub struct BulkWriteOptions { - pub ordered: Option, - pub bypass_document_validation: Option, - pub comment: Option, - #[serde(rename = "let")] - pub let_vars: Option, - pub verbose_results: Option, -} - -impl Serialize for BulkWriteOptions { - fn serialize(&self, serializer: S) -> std::result::Result - where - S: serde::Serializer, - { - let mut map_serializer = serializer.serialize_map(None)?; - - let ordered = self.ordered.unwrap_or(true); - map_serializer.serialize_entry("ordered", &ordered)?; - - if let Some(bypass_document_validation) = self.bypass_document_validation { - map_serializer - .serialize_entry("bypassDocumentValidation", &bypass_document_validation)?; - } - - if let Some(ref comment) = self.comment { - map_serializer.serialize_entry("comment", comment)?; - } - - if let Some(ref let_vars) = self.let_vars { - map_serializer.serialize_entry("let", let_vars)?; - } - - let errors_only = self.verbose_results.map(|b| !b).unwrap_or(true); - map_serializer.serialize_entry("errorsOnly", &errors_only)?; - - map_serializer.end() - } -} - #[must_use] pub struct BulkWrite<'a> { client: Client, @@ -201,12 +149,13 @@ impl ExecutionStatus { Self::Error(error) } _ => { - let bulk_write_error: Error = ErrorKind::ClientBulkWrite(BulkWriteError { - write_errors: HashMap::new(), - write_concern_errors: Vec::new(), - partial_result: Some(current_result), - }) - .into(); + let bulk_write_error: Error = + ErrorKind::ClientBulkWrite(ClientBulkWriteError { + write_errors: HashMap::new(), + write_concern_errors: Vec::new(), + partial_result: Some(current_result), + }) + .into(); Self::Error(bulk_write_error.with_source(error)) } }, @@ -228,7 +177,7 @@ impl ExecutionStatus { /// Gets a BulkWriteError from a given Error. This method should only be called when adding a /// new result or error to the existing state, as it requires that the given Error's kind is /// ClientBulkWrite. - fn get_current_bulk_write_error(error: &mut Error) -> &mut BulkWriteError { + fn get_current_bulk_write_error(error: &mut Error) -> &mut ClientBulkWriteError { match *error.kind { ErrorKind::ClientBulkWrite(ref mut bulk_write_error) => bulk_write_error, _ => unreachable!(), diff --git a/src/action/bulk_write/error.rs b/src/action/bulk_write/error.rs index dd6d27d6b..4241bf3a2 100644 --- a/src/action/bulk_write/error.rs +++ b/src/action/bulk_write/error.rs @@ -1,8 +1,9 @@ use std::collections::HashMap; -use crate::error::{WriteConcernError, WriteError}; - -use super::results::BulkWriteResult; +use crate::{ + error::{WriteConcernError, WriteError}, + results::BulkWriteResult, +}; #[derive(Clone, Debug, Default)] #[non_exhaustive] diff --git a/src/client/options.rs b/src/client/options.rs index 86af2722b..44b75cb02 100644 --- a/src/client/options.rs +++ b/src/client/options.rs @@ -1,6 +1,7 @@ #[cfg(test)] mod test; +mod bulk_write; mod resolver_config; use std::{ @@ -39,6 +40,7 @@ use crate::{ srv::{OriginalSrvInfo, SrvResolver}, }; +pub use bulk_write::*; pub use resolver_config::ResolverConfig; pub(crate) const DEFAULT_PORT: u16 = 27017; diff --git a/src/action/bulk_write/write_models.rs b/src/client/options/bulk_write.rs similarity index 75% rename from src/action/bulk_write/write_models.rs rename to src/client/options/bulk_write.rs index 437fa32d9..ca61bce3b 100644 --- a/src/action/bulk_write/write_models.rs +++ b/src/client/options/bulk_write.rs @@ -1,4 +1,6 @@ -use serde::Serialize; +#![allow(missing_docs)] + +use serde::{ser::SerializeMap, Deserialize, Serialize}; use serde_with::skip_serializing_none; use crate::{ @@ -9,6 +11,49 @@ use crate::{ Namespace, }; +#[skip_serializing_none] +#[derive(Clone, Debug, Default, Deserialize)] +#[serde(rename_all = "camelCase")] +#[non_exhaustive] +pub struct BulkWriteOptions { + pub ordered: Option, + pub bypass_document_validation: Option, + pub comment: Option, + #[serde(rename = "let")] + pub let_vars: Option, + pub verbose_results: Option, +} + +impl Serialize for BulkWriteOptions { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + let mut map_serializer = serializer.serialize_map(None)?; + + let ordered = self.ordered.unwrap_or(true); + map_serializer.serialize_entry("ordered", &ordered)?; + + if let Some(bypass_document_validation) = self.bypass_document_validation { + map_serializer + .serialize_entry("bypassDocumentValidation", &bypass_document_validation)?; + } + + if let Some(ref comment) = self.comment { + map_serializer.serialize_entry("comment", comment)?; + } + + if let Some(ref let_vars) = self.let_vars { + map_serializer.serialize_entry("let", let_vars)?; + } + + let errors_only = self.verbose_results.map(|b| !b).unwrap_or(true); + map_serializer.serialize_entry("errorsOnly", &errors_only)?; + + map_serializer.end() + } +} + #[skip_serializing_none] #[derive(Clone, Debug, Serialize)] #[serde(untagged)] diff --git a/src/error.rs b/src/error.rs index 3ef4a19f9..12fd53333 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,5 +1,7 @@ //! Contains the `Error` and `Result` types that `mongodb` uses. +mod bulk_write; + use std::{ any::Any, collections::{HashMap, HashSet}, @@ -11,12 +13,13 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use crate::{ - action::bulk_write::error::BulkWriteError as ClientBulkWriteError, bson::{Bson, Document}, options::ServerAddress, sdam::{ServerType, TopologyVersion}, }; +pub use bulk_write::BulkWriteError as ClientBulkWriteError; + const RECOVERING_CODES: [i32; 5] = [11600, 11602, 13436, 189, 91]; const NOTWRITABLEPRIMARY_CODES: [i32; 3] = [10107, 13435, 10058]; const SHUTTING_DOWN_CODES: [i32; 2] = [11600, 91]; diff --git a/src/error/bulk_write.rs b/src/error/bulk_write.rs new file mode 100644 index 000000000..71b491ee4 --- /dev/null +++ b/src/error/bulk_write.rs @@ -0,0 +1,34 @@ +#![allow(missing_docs)] + +use std::collections::HashMap; + +use crate::{ + error::{WriteConcernError, WriteError}, + results::BulkWriteResult, +}; + +#[derive(Clone, Debug, Default)] +#[non_exhaustive] +pub struct BulkWriteError { + pub write_concern_errors: Vec, + pub write_errors: HashMap, + pub partial_result: Option, +} + +impl BulkWriteError { + pub(crate) fn merge(&mut self, other: BulkWriteError) { + self.write_concern_errors.extend(other.write_concern_errors); + self.write_errors.extend(other.write_errors); + if let Some(other_partial_result) = other.partial_result { + self.merge_partial_results(other_partial_result); + } + } + + pub(crate) fn merge_partial_results(&mut self, other_partial_result: BulkWriteResult) { + if let Some(ref mut partial_result) = self.partial_result { + partial_result.merge(other_partial_result); + } else { + self.partial_result = Some(other_partial_result); + } + } +} diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index d03bed519..d401c8c8f 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -1,5 +1,3 @@ -#![allow(unused_variables, dead_code)] - mod server_responses; use std::collections::HashMap; @@ -8,19 +6,14 @@ use futures_core::TryStream; use futures_util::{FutureExt, TryStreamExt}; use crate::{ - action::bulk_write::{ - error::BulkWriteError, - results::BulkWriteResult, - write_models::{OperationType, WriteModel}, - BulkWriteOptions, - }, bson::{rawdoc, Bson, RawDocumentBuf}, bson_util::{self, array_entry_size_bytes, extend_raw_document_buf, vec_to_raw_array_buf}, cmap::{Command, RawCommandResponse, StreamDescription}, cursor::CursorSpecification, - error::{Error, ErrorKind, Result}, + error::{ClientBulkWriteError, Error, ErrorKind, Result}, operation::OperationWithDefaults, - results::{DeleteResult, InsertOneResult, UpdateResult}, + options::{BulkWriteOptions, OperationType, WriteModel}, + results::{BulkWriteResult, DeleteResult, InsertOneResult, UpdateResult}, Client, ClientSession, Cursor, @@ -79,7 +72,7 @@ impl<'a> BulkWrite<'a> { async fn iterate_results_cursor( &self, mut stream: impl TryStream + Unpin, - error: &mut BulkWriteError, + error: &mut ClientBulkWriteError, ) -> Result<()> { let result = &mut error.partial_result; @@ -240,7 +233,7 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { } if split { - // Remove the namespace doc from the list if one was added for this operation + // Remove the namespace doc from the list if one was added for this operation. if namespace_size > 0 { let last_index = namespace_info.namespaces.len() - 1; namespace_info.namespaces.remove(last_index); @@ -283,7 +276,7 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { async move { let response: WriteResponseBody = response.body()?; - let mut bulk_write_error = BulkWriteError::default(); + let mut bulk_write_error = ClientBulkWriteError::default(); // A partial result with summary info should only be created if one or more // operations were successful. @@ -354,10 +347,6 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { ) } - fn handle_error(&self, error: Error) -> Result { - Err(error) - } - fn retryability(&self) -> Retryability { if self.models.iter().any(|model| model.multi() == Some(true)) { Retryability::None diff --git a/src/operation/bulk_write/server_responses.rs b/src/operation/bulk_write/server_responses.rs index 42a35f72d..a786d8e69 100644 --- a/src/operation/bulk_write/server_responses.rs +++ b/src/operation/bulk_write/server_responses.rs @@ -1,11 +1,6 @@ use serde::Deserialize; -use crate::{ - action::bulk_write::results::BulkWriteResult, - bson::Bson, - error::WriteError, - operation::CursorInfo, -}; +use crate::{bson::Bson, error::WriteError, operation::CursorInfo, results::BulkWriteResult}; /// The top-level response to the bulkWrite command. #[derive(Deserialize)] diff --git a/src/options.rs b/src/options.rs index cf7812c48..b5f7ac37a 100644 --- a/src/options.rs +++ b/src/options.rs @@ -16,7 +16,6 @@ //! ``` pub use crate::{ - action::bulk_write::BulkWriteOptions, change_stream::options::*, client::{auth::*, options::*}, coll::options::*, diff --git a/src/results.rs b/src/results.rs index 865a6b154..8c0a65ccb 100644 --- a/src/results.rs +++ b/src/results.rs @@ -1,5 +1,7 @@ //! Contains the types of results returned by CRUD operations. +mod bulk_write; + use std::collections::{HashMap, VecDeque}; use crate::{ @@ -13,6 +15,8 @@ use crate::{ use bson::{Binary, RawDocumentBuf}; use serde::{Deserialize, Serialize}; +pub use bulk_write::*; + /// The result of a [`Collection::insert_one`](../struct.Collection.html#method.insert_one) /// operation. #[derive(Clone, Debug, Serialize)] diff --git a/src/action/bulk_write/results.rs b/src/results/bulk_write.rs similarity index 100% rename from src/action/bulk_write/results.rs rename to src/results/bulk_write.rs diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index b28110377..cb5da622c 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -1,8 +1,8 @@ use std::{sync::Arc, time::Duration}; use crate::{ - action::bulk_write::write_models::WriteModel, bson::doc, + options::WriteModel, test::{log_uncaptured, spec::unified_runner::run_unified_tests, EventHandler}, Client, Namespace, diff --git a/src/test/spec/unified_runner/operation/bulk_write.rs b/src/test/spec/unified_runner/operation/bulk_write.rs index 091582a06..887eb81ef 100644 --- a/src/test/spec/unified_runner/operation/bulk_write.rs +++ b/src/test/spec/unified_runner/operation/bulk_write.rs @@ -3,10 +3,10 @@ use futures_util::FutureExt; use serde::Deserialize; use crate::{ - action::bulk_write::{write_models::WriteModel, BulkWriteOptions}, bson::{Array, Bson, Document}, coll::options::UpdateModifications, error::Result, + options::{BulkWriteOptions, WriteModel}, test::spec::unified_runner::{Entity, TestRunner}, Namespace, }; diff --git a/src/test/spec/unified_runner/test_file.rs b/src/test/spec/unified_runner/test_file.rs index 691bd9b04..ab06af679 100644 --- a/src/test/spec/unified_runner/test_file.rs +++ b/src/test/spec/unified_runner/test_file.rs @@ -11,11 +11,10 @@ use super::{results_match, ExpectedEvent, ObserveEvent, Operation}; #[cfg(feature = "tracing-unstable")] use crate::trace; use crate::{ - action::bulk_write::error::BulkWriteError, bson::{doc, Bson, Deserializer as BsonDeserializer, Document}, client::options::{ServerApi, ServerApiVersion}, concern::{Acknowledgment, ReadConcernLevel}, - error::{Error, ErrorKind}, + error::{ClientBulkWriteError, Error, ErrorKind}, gridfs::options::GridFsBucketOptions, options::{ ClientOptions, @@ -574,7 +573,7 @@ impl ExpectError { if let Some(ref expected_result) = self.expect_result { let actual_result = match *error.kind { - ErrorKind::ClientBulkWrite(BulkWriteError { + ErrorKind::ClientBulkWrite(ClientBulkWriteError { partial_result: Some(ref partial_result), .. }) => Some(bson::to_bson(partial_result).map_err(|e| e.to_string())?), From ad7aa446f1a127505b6f6476293e7f6d6e56a74d Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 4 Mar 2024 15:13:40 -0700 Subject: [PATCH 17/75] fix files --- src/action.rs | 2 +- src/action/bulk_write/error.rs | 32 -------------------------------- 2 files changed, 1 insertion(+), 33 deletions(-) delete mode 100644 src/action/bulk_write/error.rs diff --git a/src/action.rs b/src/action.rs index c8ff1d327..f09c8a2ec 100644 --- a/src/action.rs +++ b/src/action.rs @@ -1,7 +1,7 @@ //! Action builder types. mod aggregate; -pub(crate) mod bulk_write; +mod bulk_write; mod count; mod create_collection; mod create_index; diff --git a/src/action/bulk_write/error.rs b/src/action/bulk_write/error.rs deleted file mode 100644 index 4241bf3a2..000000000 --- a/src/action/bulk_write/error.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::collections::HashMap; - -use crate::{ - error::{WriteConcernError, WriteError}, - results::BulkWriteResult, -}; - -#[derive(Clone, Debug, Default)] -#[non_exhaustive] -pub struct BulkWriteError { - pub write_concern_errors: Vec, - pub write_errors: HashMap, - pub partial_result: Option, -} - -impl BulkWriteError { - pub(crate) fn merge(&mut self, other: BulkWriteError) { - self.write_concern_errors.extend(other.write_concern_errors); - self.write_errors.extend(other.write_errors); - if let Some(other_partial_result) = other.partial_result { - self.merge_partial_results(other_partial_result); - } - } - - pub(crate) fn merge_partial_results(&mut self, other_partial_result: BulkWriteResult) { - if let Some(ref mut partial_result) = self.partial_result { - partial_result.merge(other_partial_result); - } else { - self.partial_result = Some(other_partial_result); - } - } -} From 321b64b852eb5e73541bf5985f7872a53b73abee Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Wed, 6 Mar 2024 14:51:14 -0700 Subject: [PATCH 18/75] abraham comments --- src/action/bulk_write.rs | 6 +++--- src/client/options/bulk_write.rs | 18 +++++++++++++----- src/operation.rs | 2 +- src/operation/aggregate/change_stream.rs | 2 +- src/serde_util.rs | 14 ++++++++------ 5 files changed, 26 insertions(+), 16 deletions(-) diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index f0f37b1de..0ecf2caba 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -16,20 +16,20 @@ use super::{action_impl, option_setters}; impl Client { pub fn bulk_write(&self, models: impl IntoIterator) -> BulkWrite { - BulkWrite::new(self.clone(), models.into_iter().collect()) + BulkWrite::new(self, models.into_iter().collect()) } } #[must_use] pub struct BulkWrite<'a> { - client: Client, + client: &'a Client, models: Vec, options: Option, session: Option<&'a mut ClientSession>, } impl<'a> BulkWrite<'a> { - fn new(client: Client, models: Vec) -> Self { + fn new(client: &'a Client, models: Vec) -> Self { Self { client, models, diff --git a/src/client/options/bulk_write.rs b/src/client/options/bulk_write.rs index ca61bce3b..4093aed95 100644 --- a/src/client/options/bulk_write.rs +++ b/src/client/options/bulk_write.rs @@ -29,25 +29,33 @@ impl Serialize for BulkWriteOptions { where S: serde::Serializer, { + let BulkWriteOptions { + ordered, + bypass_document_validation, + comment, + let_vars, + verbose_results, + } = self; + let mut map_serializer = serializer.serialize_map(None)?; - let ordered = self.ordered.unwrap_or(true); + let ordered = ordered.unwrap_or(true); map_serializer.serialize_entry("ordered", &ordered)?; - if let Some(bypass_document_validation) = self.bypass_document_validation { + if let Some(bypass_document_validation) = bypass_document_validation { map_serializer .serialize_entry("bypassDocumentValidation", &bypass_document_validation)?; } - if let Some(ref comment) = self.comment { + if let Some(ref comment) = comment { map_serializer.serialize_entry("comment", comment)?; } - if let Some(ref let_vars) = self.let_vars { + if let Some(ref let_vars) = let_vars { map_serializer.serialize_entry("let", let_vars)?; } - let errors_only = self.verbose_results.map(|b| !b).unwrap_or(true); + let errors_only = verbose_results.map(|b| !b).unwrap_or(true); map_serializer.serialize_entry("errorsOnly", &errors_only)?; map_serializer.end() diff --git a/src/operation.rs b/src/operation.rs index e80b0c4b8..b017fdcd3 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -83,7 +83,7 @@ pub(crate) enum OperationResponse<'a, O> { impl<'a, O> OperationResponse<'a, O> { /// Returns the sync result contained within this `OperationResponse`. Use responsibly, when it /// is known that the response is not async. - fn get_sync_result(self) -> Result { + fn as_sync_result(self) -> Result { match self { Self::Sync(result) => result, Self::Async(_) => Err(Error::internal( diff --git a/src/operation/aggregate/change_stream.rs b/src/operation/aggregate/change_stream.rs index 19ab5c0b3..4fe63858a 100644 --- a/src/operation/aggregate/change_stream.rs +++ b/src/operation/aggregate/change_stream.rs @@ -97,7 +97,7 @@ impl OperationWithDefaults for ChangeStreamAggregate { let spec = self .inner .handle_response(response, description, session) - .get_sync_result()?; + .as_sync_result()?; let mut data = ChangeStreamData { resume_token: ResumeToken::initial(self.args.options.as_ref(), &spec), diff --git a/src/serde_util.rs b/src/serde_util.rs index fdf69092c..fc1f5b721 100644 --- a/src/serde_util.rs +++ b/src/serde_util.rs @@ -221,10 +221,12 @@ where use std::str::FromStr; let string_map: HashMap = HashMap::deserialize(deserializer)?; - Ok(Some( - string_map - .into_iter() - .map(|(index, t)| (usize::from_str(&index).unwrap(), t)) - .collect(), - )) + Ok(Some(string_map.into_iter().try_fold( + HashMap::new(), + |mut map, (index, t)| { + let index = usize::from_str(&index).map_err(serde::de::Error::custom)?; + map.insert(index, t); + Ok(map) + }, + )?)) } From 31d1c9e21be9075c25fc42742e73baef1ea61a24 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 2 Apr 2024 13:57:48 -0600 Subject: [PATCH 19/75] add network errors test --- .../client-bulkWrite-clientErrors.json | 350 ++++++++++++++++++ 1 file changed, 350 insertions(+) create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.json diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.json new file mode 100644 index 000000000..64b1988ff --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.json @@ -0,0 +1,350 @@ +{ + "description": "client bulkWrite retryable writes with client errors", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with one network error succeeds after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with two network errors fails after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} From 199163f98aea2ed7794873a3db660dbfc8230152 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 2 Apr 2024 14:07:20 -0600 Subject: [PATCH 20/75] upserted id changes --- src/results.rs | 2 ++ .../client-bulkWrite-mixed-namespaces.json | 8 +++-- .../client-bulkWrite-mixed-namespaces.yml | 4 +-- .../client-bulkWrite-options.json | 4 ++- .../client-bulkWrite-options.yml | 2 +- .../client-bulkWrite-results.json | 8 +++-- .../client-bulkWrite-results.yml | 4 +-- .../client-bulkWrite-update-options.json | 32 ++++++++++++++----- .../client-bulkWrite-update-options.yml | 16 +++++----- 9 files changed, 54 insertions(+), 26 deletions(-) diff --git a/src/results.rs b/src/results.rs index 8c0a65ccb..0d63df542 100644 --- a/src/results.rs +++ b/src/results.rs @@ -16,6 +16,7 @@ use bson::{Binary, RawDocumentBuf}; use serde::{Deserialize, Serialize}; pub use bulk_write::*; +use serde_with::skip_serializing_none; /// The result of a [`Collection::insert_one`](../struct.Collection.html#method.insert_one) /// operation. @@ -55,6 +56,7 @@ impl InsertManyResult { /// The result of a [`Collection::update_one`](../struct.Collection.html#method.update_one) or /// [`Collection::update_many`](../struct.Collection.html#method.update_many) operation. +#[skip_serializing_none] #[derive(Clone, Debug, Serialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json index 33df3257c..f90755dc8 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json @@ -177,12 +177,16 @@ "2": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "5": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } } }, "deleteResults": { diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml index a34870e05..4e4cb01e1 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml @@ -91,11 +91,11 @@ tests: 2: matchedCount: 1 modifiedCount: 1 - upsertedId: null + upsertedId: { $$exists: false } 5: matchedCount: 1 modifiedCount: 1 - upsertedId: null + upsertedId: { $$exists: false } deleteResults: 3: deletedCount: 1 diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json index fd1a39300..56e0cb5cf 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json @@ -300,7 +300,9 @@ "0": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } } }, "deleteResults": { diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml index 1ef6e3192..9c3883542 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml @@ -150,7 +150,7 @@ tests: 0: matchedCount: 1 modifiedCount: 1 - upsertedId: null + upsertedId: { $$exists: false } deleteResults: 1: deletedCount: 1 diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json index 726d15ffd..97a9e50b2 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json @@ -177,12 +177,16 @@ "1": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "2": { "matchedCount": 2, "modifiedCount": 2, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "3": { "matchedCount": 1, diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml index b4731f193..eb001bbb4 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml @@ -75,11 +75,11 @@ tests: 1: matchedCount: 1 modifiedCount: 1 - upsertedId: null + upsertedId: { $$exists: false } 2: matchedCount: 2 modifiedCount: 2 - upsertedId: null + upsertedId: { $$exists: false } 3: matchedCount: 1 modifiedCount: 0 diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json index 5d5386402..93a2774e5 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json @@ -151,12 +151,16 @@ "0": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "1": { "matchedCount": 2, "modifiedCount": 2, - "upsertedId": null + "upsertedId": { + "$$exists": false + } } }, "deleteResults": {} @@ -369,17 +373,23 @@ "0": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "1": { "matchedCount": 2, "modifiedCount": 2, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "2": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } } }, "deleteResults": {} @@ -603,17 +613,23 @@ "0": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "1": { "matchedCount": 2, "modifiedCount": 2, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "2": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } } }, "deleteResults": {} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml index a04cedea6..fe188a490 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml @@ -64,11 +64,11 @@ tests: 0: matchedCount: 1 modifiedCount: 1 - upsertedId: null + upsertedId: { $$exists: false } 1: matchedCount: 2 modifiedCount: 2 - upsertedId: null + upsertedId: { $$exists: false } deleteResults: {} expectEvents: - client: *client0 @@ -140,15 +140,15 @@ tests: 0: matchedCount: 1 modifiedCount: 1 - upsertedId: null + upsertedId: { $$exists: false } 1: matchedCount: 2 modifiedCount: 2 - upsertedId: null + upsertedId: { $$exists: false } 2: matchedCount: 1 modifiedCount: 1 - upsertedId: null + upsertedId: { $$exists: false } deleteResults: {} expectEvents: - client: *client0 @@ -221,15 +221,15 @@ tests: 0: matchedCount: 1 modifiedCount: 1 - upsertedId: null + upsertedId: { $$exists: false } 1: matchedCount: 2 modifiedCount: 2 - upsertedId: null + upsertedId: { $$exists: false } 2: matchedCount: 1 modifiedCount: 1 - upsertedId: null + upsertedId: { $$exists: false } deleteResults: {} expectEvents: - client: *client0 From 77daa8e46d64d46405d2506e38c1ce4e5505d9a2 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 2 Apr 2024 14:19:50 -0600 Subject: [PATCH 21/75] rename write concern error message --- src/error.rs | 2 +- .../new-bulk-write/client-bulkWrite-errors.json | 8 +++----- .../new-bulk-write/client-bulkWrite-errors.yml | 13 ++++++++----- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/error.rs b/src/error.rs index 12fd53333..300092a53 100644 --- a/src/error.rs +++ b/src/error.rs @@ -749,7 +749,7 @@ pub struct WriteConcernError { pub code_name: String, /// A description of the error that occurred. - #[serde(rename = "errmsg", default = "String::new")] + #[serde(alias = "errmsg", default = "String::new")] pub message: String, /// A document identifying the write concern setting related to the error. diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json index 1404fad42..a33dffd50 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json @@ -56,10 +56,8 @@ ], "_yamlAnchors": { "namespace": "crud-tests.coll0", - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - }, + "writeConcernErrorCode": 91, + "writeConcernErrorMessage": "Replication is being shut down", "undefinedVarCode": 17276 }, "tests": [ @@ -429,7 +427,7 @@ "writeConcernErrors": [ { "code": 91, - "errmsg": "Replication is being shut down" + "message": "Replication is being shut down" } ] } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml index e05bef220..7c587f824 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml @@ -29,9 +29,8 @@ initialData: _yamlAnchors: namespace: &namespace "crud-tests.coll0" - writeConcernError: &writeConcernError - code: 91 - errmsg: "Replication is being shut down" + writeConcernErrorCode: &writeConcernErrorCode 91 + writeConcernErrorMessage: &writeConcernErrorMessage "Replication is being shut down" undefinedVarCode: &undefinedVarCode 17276 # Use of an undefined variable tests: @@ -204,7 +203,9 @@ tests: data: failCommands: - bulkWrite - writeConcernError: *writeConcernError + writeConcernError: + code: *writeConcernErrorCode + errmsg: *writeConcernErrorMessage - object: *client0 name: clientBulkWrite arguments: @@ -225,4 +226,6 @@ tests: insertedId: 10 updateResults: {} deleteResults: {} - writeConcernErrors: [ *writeConcernError ] + writeConcernErrors: + - code: *writeConcernErrorCode + message: *writeConcernErrorMessage From 1193a5c183f06831f189a2655d5f5cbbf219d246 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 2 Apr 2024 16:00:44 -0600 Subject: [PATCH 22/75] add cursor iteration test --- src/test/bulk_write.rs | 57 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index cb5da622c..a73294f33 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -1,7 +1,13 @@ use std::{sync::Arc, time::Duration}; +use rand::{ + distributions::{Alphanumeric, DistString}, + thread_rng, +}; + use crate::{ - bson::doc, + bson::{doc, Document}, + error::ErrorKind, options::WriteModel, test::{log_uncaptured, spec::unified_runner::run_unified_tests, EventHandler}, Client, @@ -131,3 +137,52 @@ async fn max_message_size_bytes_batching() { assert_eq!(first_ops_len + second_ops_len, num_models); } + +#[tokio::test] +async fn cursor_iteration() { + let handler = Arc::new(EventHandler::new()); + let client = Client::test_builder() + .event_handler(handler.clone()) + .build() + .await; + let mut subscriber = handler.subscribe(); + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + // 8.0+ servers always report this value + let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; + let id_size = max_bson_object_size / max_write_batch_size; + + let document = doc! { "_id": Alphanumeric.sample_string(&mut thread_rng(), id_size) }; + client + .database("bulk") + .collection::("write") + .insert_one(&document, None) + .await + .unwrap(); + + let models = vec![ + WriteModel::InsertOne { + namespace: Namespace::new("bulk", "write"), + document + }; + max_write_batch_size + ]; + let error = client.bulk_write(models).ordered(false).await.unwrap_err(); + + assert!(error.source.is_none()); + + let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { + panic!("Expected bulk write error, got {:?}", error); + }; + + assert!(bulk_write_error.write_concern_errors.is_empty()); + // assert!(bulk_write_error.partial_result.is_none()); + + let write_errors = bulk_write_error.write_errors; + assert_eq!(write_errors.len(), max_write_batch_size); + + subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "getMore") + .await + .expect("no getMore observed"); +} From d62beda7f00b9f72ff690b792f31b9a373720b1c Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Wed, 3 Apr 2024 13:44:53 -0600 Subject: [PATCH 23/75] sync retryability tests --- .../client-bulkWrite-clientErrors.yml | 172 ++++ .../client-bulkWrite-serverErrors.json | 778 ++++++++++++++++++ .../client-bulkWrite-serverErrors.yml | 367 +++++++++ 3 files changed, 1317 insertions(+) create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.yml create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.yml diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.yml new file mode 100644 index 000000000..d1b08ec37 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.yml @@ -0,0 +1,172 @@ +description: "client bulkWrite retryable writes with client errors" +schemaVersion: "1.20" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with one network error succeeds after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 4 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with two network errors fails after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 2 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectError: + isClientError: true + errorLabelsContain: ["RetryableWriteError"] # Error label added by driver. + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.json new file mode 100644 index 000000000..fd517adb5 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.json @@ -0,0 +1,778 @@ +{ + "description": "client bulkWrite retryable writes", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 222 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.yml new file mode 100644 index 000000000..e5022870c --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.yml @@ -0,0 +1,367 @@ +description: "client bulkWrite retryable writes" +schemaVersion: "1.20" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with no multi: true operations succeeds after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: *namespace + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + 0: + insertedId: 4 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 222 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with multi: true operations fails after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: *namespace + filter: { _id: 3 } + expectError: + errorCode: 189 + errorLabelsContain: [ RetryableWriteError ] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: *namespace + - description: "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: *namespace + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + 0: + insertedId: 4 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - description: "client bulkWrite with multi: true operations fails after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: *namespace + filter: { _id: 3 } + expectError: + writeConcernErrors: + - code: 91 + message: "Replication is being shut down" + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: *namespace From 427ec5145db80e4c261f7e0fe6ec828452c9bdd9 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 4 Apr 2024 10:16:13 -0600 Subject: [PATCH 24/75] write concern error prose test, refactor fail point --- src/action/bulk_write.rs | 4 +- src/cmap/test/integration.rs | 35 ++- src/operation/bulk_write/server_responses.rs | 4 +- .../server_selection/test/in_window.rs | 19 +- src/sdam/description/topology/test/sdam.rs | 17 +- src/sdam/test.rs | 47 ++-- src/test.rs | 1 - src/test/bulk_write.rs | 86 ++++--- src/test/change_stream.rs | 57 +---- src/test/client.rs | 27 +- src/test/spec/gridfs.rs | 21 +- .../client-bulkWrite-serverErrors.json | 54 +++- .../unified/client-bulkWrite-serverErrors.yml | 131 ++++++---- src/test/spec/retryable_reads.rs | 34 +-- src/test/spec/retryable_writes.rs | 91 +++---- src/test/spec/sdam.rs | 13 +- src/test/spec/transactions.rs | 31 +-- src/test/spec/unified_runner/operation.rs | 20 +- src/test/spec/v2_runner.rs | 26 +- src/test/spec/v2_runner/operation.rs | 8 +- src/test/util.rs | 11 +- src/test/util/failpoint.rs | 239 ++++++++++-------- 22 files changed, 472 insertions(+), 504 deletions(-) diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index 0ecf2caba..b9383b224 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -193,8 +193,8 @@ impl ExecutionStatus { ErrorKind::ClientBulkWrite(ref bulk_write_error) => { // A top-level error is always fatal. If an individual operation fails // during an ordered bulk write, no more batches should be executed. - error.source.is_some() - || (ordered && !bulk_write_error.write_errors.is_empty()) + !(error.source.is_some() + || (ordered && !bulk_write_error.write_errors.is_empty())) } // A top-level error is always fatal. _ => false, diff --git a/src/cmap/test/integration.rs b/src/cmap/test/integration.rs index b33ed936e..3efb76068 100644 --- a/src/cmap/test/integration.rs +++ b/src/cmap/test/integration.rs @@ -15,14 +15,7 @@ use crate::{ runtime, sdam::TopologyUpdater, selection_criteria::ReadPreference, - test::{ - get_client_options, - log_uncaptured, - FailCommandOptions, - FailPoint, - FailPointMode, - TestClient, - }, + test::{get_client_options, log_uncaptured, FailPoint, FailPointMode, TestClient}, }; use semver::VersionReq; use std::{sync::Arc, time::Duration}; @@ -188,13 +181,16 @@ async fn connection_error_during_establishment() { return; } - let options = FailCommandOptions::builder().error_code(1234).build(); - let failpoint = FailPoint::fail_command( - &[LEGACY_HELLO_COMMAND_NAME, "hello"], - FailPointMode::Times(10), - Some(options), - ); - let _fp_guard = client.enable_failpoint(failpoint, None).await.unwrap(); + let _guard = client + .configure_fail_point( + FailPoint::new( + &[LEGACY_HELLO_COMMAND_NAME, "hello"], + FailPointMode::Times(10), + ) + .error_code(1234), + ) + .await + .unwrap(); let handler = Arc::new(TestEventHandler::new()); let mut subscriber = handler.subscribe(); @@ -243,9 +239,12 @@ async fn connection_error_during_operation() { return; } - let options = FailCommandOptions::builder().close_connection(true).build(); - let failpoint = FailPoint::fail_command(&["ping"], FailPointMode::Times(10), Some(options)); - let _fp_guard = client.enable_failpoint(failpoint, None).await.unwrap(); + let _guard = client + .configure_fail_point( + FailPoint::new(&["ping"], FailPointMode::Times(10)).close_connection(true), + ) + .await + .unwrap(); let mut subscriber = handler.subscribe(); diff --git a/src/operation/bulk_write/server_responses.rs b/src/operation/bulk_write/server_responses.rs index a786d8e69..15c70b3ab 100644 --- a/src/operation/bulk_write/server_responses.rs +++ b/src/operation/bulk_write/server_responses.rs @@ -3,7 +3,7 @@ use serde::Deserialize; use crate::{bson::Bson, error::WriteError, operation::CursorInfo, results::BulkWriteResult}; /// The top-level response to the bulkWrite command. -#[derive(Deserialize)] +#[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub(super) struct Response { pub(super) cursor: CursorInfo, @@ -12,7 +12,7 @@ pub(super) struct Response { } /// The summary information contained within the top-level response to the bulkWrite command. -#[derive(Deserialize)] +#[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub(super) struct SummaryInfo { pub(super) n_errors: i64, diff --git a/src/sdam/description/topology/server_selection/test/in_window.rs b/src/sdam/description/topology/server_selection/test/in_window.rs index 7422aa801..4cc651f13 100644 --- a/src/sdam/description/topology/server_selection/test/in_window.rs +++ b/src/sdam/description/topology/server_selection/test/in_window.rs @@ -21,7 +21,6 @@ use crate::{ run_spec_test, Event, EventHandler, - FailCommandOptions, FailPoint, FailPointMode, TestClient, @@ -253,22 +252,18 @@ async fn load_balancing_test() { drop(subscriber); // enable a failpoint on one of the mongoses to slow it down - let options = FailCommandOptions::builder() - .block_connection(Duration::from_millis(500)) - .build(); - let failpoint = FailPoint::fail_command(&["find"], FailPointMode::AlwaysOn, options); - let slow_host = get_client_options().await.hosts[0].clone(); - let criteria = SelectionCriteria::Predicate(Arc::new(move |si| si.address() == &slow_host)); - let fp_guard = setup_client - .enable_failpoint(failpoint, criteria) - .await - .expect("enabling failpoint should succeed"); + let slow_host_criteria = + SelectionCriteria::Predicate(Arc::new(move |si| si.address() == &slow_host)); + let fail_point = FailPoint::new(&["find"], FailPointMode::AlwaysOn) + .block_connection(Duration::from_millis(500)) + .selection_criteria(slow_host_criteria); + let guard = setup_client.configure_fail_point(fail_point).await.unwrap(); // verify that the lesser picked server (slower one) was picked less than 25% of the time. do_test(&client, &mut handler, 0.05, 0.25, 10).await; // disable failpoint and rerun, should be back to even split - drop(fp_guard); + drop(guard); do_test(&client, &mut handler, 0.40, 0.50, 100).await; } diff --git a/src/sdam/description/topology/test/sdam.rs b/src/sdam/description/topology/test/sdam.rs index cd45346b7..2488c8c6e 100644 --- a/src/sdam/description/topology/test/sdam.rs +++ b/src/sdam/description/topology/test/sdam.rs @@ -31,7 +31,6 @@ use crate::{ Event, EventClient, EventHandler, - FailCommandOptions, FailPoint, FailPointMode, TestClient, @@ -673,19 +672,13 @@ async fn heartbeat_events() { options.heartbeat_freq = None; let fp_client = TestClient::with_options(Some(options)).await; - let fp_options = FailCommandOptions::builder() - .error_code(1234) - .app_name("heartbeat_events".to_string()) - .build(); - let failpoint = FailPoint::fail_command( + let fail_point = FailPoint::new( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::AlwaysOn, - fp_options, - ); - let _fp_guard = fp_client - .enable_failpoint(failpoint, None) - .await - .expect("enabling failpoint should succeed"); + ) + .app_name("heartbeat_events") + .error_code(1234); + let _guard = fp_client.configure_fail_point(fail_point).await.unwrap(); subscriber .wait_for_event(Duration::from_millis(500), |event| { diff --git a/src/sdam/test.rs b/src/sdam/test.rs index 7425108cb..7b293fea4 100644 --- a/src/sdam/test.rs +++ b/src/sdam/test.rs @@ -20,7 +20,6 @@ use crate::{ Event, EventClient, EventHandler, - FailCommandOptions, FailPoint, FailPointMode, TestClient, @@ -48,20 +47,17 @@ async fn min_heartbeat_frequency() { return; } - let fp_options = FailCommandOptions::builder() - .app_name("SDAMMinHeartbeatFrequencyTest".to_string()) - .error_code(1234) - .build(); - let failpoint = FailPoint::fail_command( - &[LEGACY_HELLO_COMMAND_NAME, "hello"], - FailPointMode::Times(5), - fp_options, - ); - - let _fp_guard = setup_client - .enable_failpoint(failpoint, None) + let _guard = setup_client + .configure_fail_point( + FailPoint::new( + &[LEGACY_HELLO_COMMAND_NAME, "hello"], + FailPointMode::Times(5), + ) + .app_name("SDAMMinHeartbeatFrequencyTest") + .error_code(1234), + ) .await - .expect("enabling failpoint should succeed"); + .unwrap(); let mut options = setup_client_options; options.app_name = Some("SDAMMinHeartbeatFrequencyTest".to_string()); @@ -135,20 +131,17 @@ async fn sdam_pool_management() { .await .expect("should see server heartbeat succeeded event"); - let fp_options = FailCommandOptions::builder() - .app_name("SDAMPoolManagementTest".to_string()) - .error_code(1234) - .build(); - let failpoint = FailPoint::fail_command( - &[LEGACY_HELLO_COMMAND_NAME, "hello"], - FailPointMode::Times(4), - fp_options, - ); - - let _fp_guard = client - .enable_failpoint(failpoint, None) + let _guard = client + .configure_fail_point( + FailPoint::new( + &[LEGACY_HELLO_COMMAND_NAME, "hello"], + FailPointMode::Times(4), + ) + .app_name("SDAMPoolManagementTest") + .error_code(1234), + ) .await - .expect("enabling failpoint should succeed"); + .unwrap(); // Since there is no deterministic ordering, simply collect all the events and check for their // presence. diff --git a/src/test.rs b/src/test.rs index 9acb760ae..945617534 100644 --- a/src/test.rs +++ b/src/test.rs @@ -29,7 +29,6 @@ pub(crate) use self::{ Event, EventClient, EventHandler, - FailCommandOptions, FailPoint, FailPointMode, MatchErrExt, diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index a73294f33..82afed90b 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -9,7 +9,14 @@ use crate::{ bson::{doc, Document}, error::ErrorKind, options::WriteModel, - test::{log_uncaptured, spec::unified_runner::run_unified_tests, EventHandler}, + test::{ + get_client_options, + log_uncaptured, + spec::unified_runner::run_unified_tests, + EventHandler, + FailPoint, + FailPointMode, + }, Client, Namespace, }; @@ -59,42 +66,6 @@ async fn max_write_batch_size_batching() { assert_eq!(second_len, 1); } -#[tokio::test] -async fn max_bson_object_size_with_document_sequences() { - let handler = Arc::new(EventHandler::new()); - let client = Client::test_builder() - .event_handler(handler.clone()) - .build() - .await; - let mut subscriber = handler.subscribe(); - - if client.server_version_lt(8, 0) { - log_uncaptured( - "skipping max_bson_object_size_with_document_sequences: bulkWrite requires 8.0+", - ); - return; - } - - let max_bson_object_size = client.server_info.max_bson_object_size as usize; - - let document = doc! { "a": "b".repeat(max_bson_object_size / 2) }; - let model = WriteModel::InsertOne { - namespace: Namespace::new("db", "coll"), - document, - }; - let models = vec![model; 2]; - - let result = client.bulk_write(models).await.unwrap(); - assert_eq!(result.inserted_count as usize, 2); - - let (started, _) = subscriber - .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") - .await - .expect("no events observed"); - let len = started.command.get_array("ops").unwrap().len(); - assert_eq!(len, 2); -} - #[tokio::test] async fn max_message_size_bytes_batching() { let handler = Arc::new(EventHandler::new()); @@ -127,15 +98,15 @@ async fn max_message_size_bytes_batching() { .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") .await .expect("no events observed"); - let first_ops_len = first_started.command.get_array("ops").unwrap().len(); + let first_len = first_started.command.get_array("ops").unwrap().len(); + assert_eq!(first_len, num_models - 1); let (second_started, _) = subscriber .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") .await .expect("no events observed"); - let second_ops_len = second_started.command.get_array("ops").unwrap().len(); - - assert_eq!(first_ops_len + second_ops_len, num_models); + let second_len = second_started.command.get_array("ops").unwrap().len(); + assert_eq!(second_len, 1); } #[tokio::test] @@ -186,3 +157,36 @@ async fn cursor_iteration() { .await .expect("no getMore observed"); } + +#[tokio::test(flavor = "multi_thread")] +async fn write_concern_errors_are_collected() { + let mut options = get_client_options().await.clone(); + options.retry_writes = Some(false); + let client = Client::test_builder().options(options).build().await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping max_write_batch_size_batching: bulkWrite requires 8.0+"); + return; + } + + let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; + + let fail_point = FailPoint::new(&["bulkWrite"], FailPointMode::Times(2)) + .write_concern_error(doc! { "code": 91, "errmsg": "Replication is being shut down" }); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); + + let models = vec![ + WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document: doc! { "a": "b" } + }; + max_write_batch_size + 1 + ]; + let error = client.bulk_write(models).ordered(false).await.unwrap_err(); + + let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { + panic!("Expected bulk write error, got {:?}", error); + }; + + assert_eq!(bulk_write_error.write_concern_errors.len(), 2); +} diff --git a/src/test/change_stream.rs b/src/test/change_stream.rs index 53b2f4c5e..377020fea 100644 --- a/src/test/change_stream.rs +++ b/src/test/change_stream.rs @@ -12,7 +12,7 @@ use crate::{ db::options::ChangeStreamPreAndPostImages, event::command::{CommandEvent, CommandStartedEvent, CommandSucceededEvent}, options::{Acknowledgment, WriteConcern}, - test::{FailCommandOptions, FailPoint, FailPointMode}, + test::{FailPoint, FailPointMode}, Client, Collection, }; @@ -164,13 +164,8 @@ async fn resumes_on_error() -> Result<()> { }) if key == doc! { "_id": 1 } )); - let _guard = FailPoint::fail_command( - &["getMore"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); + let _guard = client.configure_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }, None).await?; assert!(matches!(stream.next().await.transpose()?, @@ -197,13 +192,8 @@ async fn does_not_resume_aggregate() -> Result<()> { None => return Ok(()), }; - let _guard = FailPoint::fail_command( - &["aggregate"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = FailPoint::new(&["aggregate"], FailPointMode::Times(1)).error_code(43); + let _guard = client.configure_fail_point(fail_point).await?; assert!(coll.watch().await.is_err()); @@ -265,13 +255,9 @@ async fn resume_kill_cursor_error_suppressed() -> Result<()> { }) if key == doc! { "_id": 1 } )); - let _guard = FailPoint::fail_command( - &["getMore", "killCursors"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = + FailPoint::new(&["getMore", "killCursors"], FailPointMode::Times(1)).error_code(43); + let _guard = client.configure_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }, None).await?; assert!(matches!(stream.next().await.transpose()?, @@ -310,13 +296,8 @@ async fn resume_start_at_operation_time() -> Result<()> { return Ok(()); } - let _guard = FailPoint::fail_command( - &["getMore"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); + let _guard = client.configure_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }, None).await?; stream.next().await.transpose()?; @@ -518,13 +499,8 @@ async fn resume_uses_start_after() -> Result<()> { // Create an event, and synthesize a resumable error when calling `getMore` for that event. coll.insert_one(doc! {}, None).await?; - let _guard = FailPoint::fail_command( - &["getMore"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); + let _guard = client.configure_fail_point(fail_point).await?; stream.next().await.transpose()?; let commands = client.get_command_started_events(&["aggregate"]); @@ -578,13 +554,8 @@ async fn resume_uses_resume_after() -> Result<()> { // Create an event, and synthesize a resumable error when calling `getMore` for that event. coll.insert_one(doc! {}, None).await?; - let _guard = FailPoint::fail_command( - &["getMore"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); + let _guard = client.configure_fail_point(fail_point).await?; stream.next().await.transpose()?; let commands = client.get_command_started_events(&["aggregate"]); diff --git a/src/test/client.rs b/src/test/client.rs index 4ffb1e54a..aa7e9b288 100644 --- a/src/test/client.rs +++ b/src/test/client.rs @@ -18,7 +18,6 @@ use crate::{ util::TestClient, Event, EventHandler, - FailCommandOptions, FailPoint, FailPointMode, SERVER_API, @@ -706,14 +705,10 @@ async fn retry_commit_txn_check_out() { .await .unwrap(); - // enable a fail point that clears the connection pools so that - // commitTransaction will create a new connection during check out. - let fp = FailPoint::fail_command( - &["ping"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(11600).build(), - ); - let _guard = setup_client.enable_failpoint(fp, None).await.unwrap(); + // Enable a fail point that clears the connection pools so that commitTransaction will create a + // new connection during checkout. + let fail_point = FailPoint::new(&["ping"], FailPointMode::Times(1)).error_code(11600); + let _guard = setup_client.configure_fail_point(fail_point).await.unwrap(); let mut subscriber = handler.subscribe(); client @@ -756,17 +751,13 @@ async fn retry_commit_txn_check_out() { .await .expect("should see mark available event"); - // enable a failpoint on the handshake to cause check_out - // to fail with a retryable error - let fp = FailPoint::fail_command( + let fail_point = FailPoint::new( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::Times(1), - FailCommandOptions::builder() - .error_code(11600) - .app_name("retry_commit_txn_check_out".to_string()) - .build(), - ); - let _guard2 = setup_client.enable_failpoint(fp, None).await.unwrap(); + ) + .error_code(11600) + .app_name("retry_commit_txn_check_out"); + let _guard2 = setup_client.configure_fail_point(fail_point).await.unwrap(); // finally, attempt the commit. // this should succeed due to retry diff --git a/src/test/spec/gridfs.rs b/src/test/spec/gridfs.rs index bf55830a6..bc29758d9 100644 --- a/src/test/spec/gridfs.rs +++ b/src/test/spec/gridfs.rs @@ -11,7 +11,6 @@ use crate::{ test::{ get_client_options, spec::unified_runner::run_unified_tests, - FailCommandOptions, FailPoint, FailPointMode, TestClient, @@ -210,14 +209,8 @@ async fn upload_stream_errors() { GridFsUploadOptions::builder().chunk_size_bytes(1).build(), ); - let _fp_guard = FailPoint::fail_command( - &["insert"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(1234).build(), - ) - .enable(&client, None) - .await - .unwrap(); + let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)).error_code(1234); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); let error = get_mongo_error(upload_stream.write_all(&[11]).await); assert_eq!(error.sdam_code(), Some(1234)); @@ -232,14 +225,8 @@ async fn upload_stream_errors() { upload_stream.write_all(&[11]).await.unwrap(); - let _fp_guard = FailPoint::fail_command( - &["insert"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(1234).build(), - ) - .enable(&client, None) - .await - .unwrap(); + let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)).error_code(1234); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); let error = get_mongo_error(upload_stream.close().await); assert_eq!(error.sdam_code(), Some(1234)); diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json index caab0b546..fd517adb5 100644 --- a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json @@ -5,7 +5,9 @@ { "minServerVersion": "8.0", "topologies": [ - "replicaset" + "replicaset", + "sharded", + "load-balanced" ] } ], @@ -147,12 +149,16 @@ "1": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "2": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } } }, "deleteResults": { @@ -217,7 +223,13 @@ { "ns": "retryable-writes-tests.coll0" } - ] + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } } } }, @@ -271,7 +283,13 @@ { "ns": "retryable-writes-tests.coll0" } - ] + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } } } } @@ -497,12 +515,16 @@ "1": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "2": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } } }, "deleteResults": { @@ -567,7 +589,13 @@ { "ns": "retryable-writes-tests.coll0" } - ] + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } } } }, @@ -621,7 +649,13 @@ { "ns": "retryable-writes-tests.coll0" } - ] + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } } } } @@ -689,7 +723,7 @@ "writeConcernErrors": [ { "code": 91, - "errmsg": "Replication is being shut down" + "message": "Replication is being shut down" } ] } diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml index a882a8f6c..e5022870c 100644 --- a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml @@ -1,13 +1,17 @@ description: "client bulkWrite retryable writes" -schemaVersion: "1.18" +schemaVersion: "1.20" runOnRequirements: - minServerVersion: "8.0" - topologies: [ replicaset ] + topologies: + - replicaset + - sharded + - load-balanced createEntities: - client: id: &client0 client0 observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false - database: id: &database0 database0 client: *client0 @@ -25,6 +29,9 @@ initialData: - { _id: 2, x: 22 } - { _id: 3, x: 33 } +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + tests: - description: "client bulkWrite with no multi: true operations succeeds after retryable top-level error" operations: @@ -45,28 +52,21 @@ tests: arguments: models: - insertOne: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace document: { _id: 4, x: 44 } - updateOne: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace filter: { _id: 1 } update: $inc: { x: 1 } - replaceOne: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace filter: { _id: 2 } replacement: { x: 222 } - deleteOne: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace filter: { _id: 3 } + verboseResults: true expectResult: insertedCount: 1 upsertedCount: 0 @@ -74,11 +74,20 @@ tests: modifiedCount: 2 deletedCount: 1 insertResults: - $$unsetOrMatches: {} + 0: + insertedId: 4 updateResults: - $$unsetOrMatches: {} + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } deleteResults: - $$unsetOrMatches: {} + 3: + deletedCount: 1 expectEvents: - client: *client0 events: @@ -86,6 +95,9 @@ tests: commandName: bulkWrite databaseName: admin command: + bulkWrite: 1 + errorsOnly: false + ordered: true ops: - insert: 0 document: { _id: 4, x: 44 } @@ -102,11 +114,16 @@ tests: filter: { _id: 3 } multi: false nsInfo: - - ns: retryable-writes-tests.coll0 + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } - commandStartedEvent: commandName: bulkWrite databaseName: admin command: + bulkWrite: 1 + errorsOnly: false + ordered: true ops: - insert: 0 document: { _id: 4, x: 44 } @@ -123,7 +140,9 @@ tests: filter: { _id: 3 } multi: false nsInfo: - - ns: retryable-writes-tests.coll0 + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } outcome: - collectionName: *collection0Name databaseName: *database0Name @@ -150,16 +169,12 @@ tests: arguments: models: - updateMany: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace filter: { _id: 1 } update: $inc: { x: 1 } - deleteMany: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace filter: { _id: 3 } expectError: errorCode: 189 @@ -171,6 +186,9 @@ tests: commandName: bulkWrite databaseName: admin command: + bulkWrite: 1 + errorsOnly: true + ordered: true ops: - update: 0 filter: { _id: 1 } @@ -181,7 +199,7 @@ tests: filter: { _id: 3 } multi: true nsInfo: - - ns: retryable-writes-tests.coll0 + - ns: *namespace - description: "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError" operations: - object: testRunner @@ -203,28 +221,21 @@ tests: arguments: models: - insertOne: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace document: { _id: 4, x: 44 } - updateOne: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace filter: { _id: 1 } update: $inc: { x: 1 } - replaceOne: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace filter: { _id: 2 } replacement: { x: 222 } - deleteOne: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace filter: { _id: 3 } + verboseResults: true expectResult: insertedCount: 1 upsertedCount: 0 @@ -232,11 +243,20 @@ tests: modifiedCount: 2 deletedCount: 1 insertResults: - $$unsetOrMatches: {} + 0: + insertedId: 4 updateResults: - $$unsetOrMatches: {} + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } deleteResults: - $$unsetOrMatches: {} + 3: + deletedCount: 1 expectEvents: - client: *client0 events: @@ -244,6 +264,9 @@ tests: commandName: bulkWrite databaseName: admin command: + bulkWrite: 1 + errorsOnly: false + ordered: true ops: - insert: 0 document: { _id: 4, x: 44 } @@ -260,11 +283,16 @@ tests: filter: { _id: 3 } multi: false nsInfo: - - ns: retryable-writes-tests.coll0 + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } - commandStartedEvent: commandName: bulkWrite databaseName: admin command: + bulkWrite: 1 + errorsOnly: false + ordered: true ops: - insert: 0 document: { _id: 4, x: 44 } @@ -281,7 +309,9 @@ tests: filter: { _id: 3 } multi: false nsInfo: - - ns: retryable-writes-tests.coll0 + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } - description: "client bulkWrite with multi: true operations fails after retryable writeConcernError" operations: - object: testRunner @@ -303,21 +333,17 @@ tests: arguments: models: - updateMany: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace filter: { _id: 1 } update: $inc: { x: 1 } - deleteMany: - namespace: - db: *database0Name - coll: *collection0Name + namespace: *namespace filter: { _id: 3 } expectError: writeConcernErrors: - code: 91 - errmsg: "Replication is being shut down" + message: "Replication is being shut down" expectEvents: - client: *client0 events: @@ -325,6 +351,9 @@ tests: commandName: bulkWrite databaseName: admin command: + bulkWrite: 1 + errorsOnly: true + ordered: true ops: - update: 0 filter: { _id: 1 } @@ -335,4 +364,4 @@ tests: filter: { _id: 3 } multi: true nsInfo: - - ns: retryable-writes-tests.coll0 + - ns: *namespace diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index c680c2b42..a8d0cfe68 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -16,7 +16,6 @@ use crate::{ spec::{unified_runner::run_unified_tests, v2_runner::run_v2_tests}, Event, EventHandler, - FailCommandOptions, FailPoint, FailPointMode, TestClient, @@ -54,11 +53,8 @@ async fn retry_releases_connection() { .collection("retry_releases_connection"); collection.insert_one(doc! { "x": 1 }, None).await.unwrap(); - // Use a connection error to ensure streaming monitor checks get cancelled. Otherwise, we'd have - // to wait for the entire heartbeatFrequencyMS before the find succeeds. - let options = FailCommandOptions::builder().close_connection(true).build(); - let failpoint = FailPoint::fail_command(&["find"], FailPointMode::Times(1), Some(options)); - let _fp_guard = client.enable_failpoint(failpoint, None).await.unwrap(); + let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)).close_connection(true); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); runtime::timeout(Duration::from_secs(1), collection.find_one(doc! {}, None)) .await @@ -98,12 +94,10 @@ async fn retry_read_pool_cleared() { .collection("retry_read_pool_cleared"); collection.insert_one(doc! { "x": 1 }, None).await.unwrap(); - let options = FailCommandOptions::builder() + let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)) .error_code(91) - .block_connection(Duration::from_secs(1)) - .build(); - let failpoint = FailPoint::fail_command(&["find"], FailPointMode::Times(1), Some(options)); - let _fp_guard = client.enable_failpoint(failpoint, None).await.unwrap(); + .block_connection(Duration::from_secs(1)); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); let mut subscriber = handler.subscribe(); @@ -181,12 +175,11 @@ async fn retry_read_different_mongos() { log_uncaptured("skipping retry_read_different_mongos: requires failCommand"); return; } - let fail_opts = FailCommandOptions::builder() + + let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)) .error_code(6) - .close_connection(true) - .build(); - let fp = FailPoint::fail_command(&["find"], FailPointMode::Times(1), Some(fail_opts)); - guards.push(client.enable_failpoint(fp, None).await.unwrap()); + .close_connection(true); + guards.push(client.configure_fail_point(fail_point).await.unwrap()); } let client = Client::test_builder() @@ -238,12 +231,11 @@ async fn retry_read_same_mongos() { let mut client_options = client_options.clone(); client_options.direct_connection = Some(true); let client = Client::test_builder().options(client_options).build().await; - let fail_opts = FailCommandOptions::builder() + + let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)) .error_code(6) - .close_connection(true) - .build(); - let fp = FailPoint::fail_command(&["find"], FailPointMode::Times(1), Some(fail_opts)); - client.enable_failpoint(fp, None).await.unwrap() + .close_connection(true); + client.configure_fail_point(fail_point).await.unwrap() }; let client = Client::test_builder() diff --git a/src/test/spec/retryable_writes.rs b/src/test/spec/retryable_writes.rs index 085ea4ab5..c94cbfac7 100644 --- a/src/test/spec/retryable_writes.rs +++ b/src/test/spec/retryable_writes.rs @@ -30,7 +30,6 @@ use crate::{ Event, EventClient, EventHandler, - FailCommandOptions, FailPoint, FailPointMode, TestClient, @@ -46,7 +45,7 @@ async fn run_unified() { #[tokio::test(flavor = "multi_thread")] async fn run_legacy() { async fn run_test(test_file: TestFile) { - for mut test_case in test_file.tests { + for test_case in test_file.tests { if test_case.operation.name == "bulkWrite" { continue; } @@ -83,13 +82,13 @@ async fn run_legacy() { .expect(&test_case.description); } - let _fp_guard = if let Some(ref mut fail_point) = test_case.fail_point { - Some(fail_point.enable(&client, None).await.unwrap_or_else(|e| { - panic!( - "{}: error enabling failpoint: {:#?}", - test_case.description, e - ) - })) + let guard = if let Some(fail_point) = test_case.fail_point { + Some( + client + .configure_fail_point(fail_point) + .await + .expect(&test_case.description), + ) } else { None }; @@ -98,7 +97,7 @@ async fn run_legacy() { let result = test_case.operation.execute_on_collection(&coll, None).await; // Disable the failpoint, if any. - drop(_fp_guard); + drop(guard); if let Some(error) = test_case.outcome.error { assert_eq!( @@ -415,13 +414,11 @@ async fn retry_write_pool_cleared() { .database("retry_write_pool_cleared") .collection("retry_write_pool_cleared"); - let options = FailCommandOptions::builder() + let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)) .error_code(91) .block_connection(Duration::from_secs(1)) - .error_labels(vec![RETRYABLE_WRITE_ERROR.to_string()]) - .build(); - let failpoint = FailPoint::fail_command(&["insert"], FailPointMode::Times(1), Some(options)); - let _fp_guard = client.enable_failpoint(failpoint, None).await.unwrap(); + .error_labels(vec![RETRYABLE_WRITE_ERROR]); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); let mut subscriber = handler.subscribe(); @@ -504,20 +501,19 @@ async fn retry_write_retryable_write_error() { // Enable the failpoint. let fp_guard = { let client = client.lock().await; - FailPoint::fail_command( - &["insert"], - FailPointMode::Times(1), - FailCommandOptions::builder() + let fail_point = + FailPoint::new(&["insert"], FailPointMode::Times(1)) .error_code(10107) .error_labels(vec![ - "RetryableWriteError".to_string(), - "NoWritesPerformed".to_string(), - ]) - .build(), - ) - .enable(client.as_ref().unwrap(), None) - .await - .unwrap() + "RetryableWriteError", + "NoWritesPerformed", + ]); + client + .as_ref() + .unwrap() + .configure_fail_point(fail_point) + .await + .unwrap() }; fp_tx.send(fp_guard).unwrap(); // Defer acknowledging the message until the failpoint has been set @@ -539,19 +535,12 @@ async fn retry_write_retryable_write_error() { return; } - let _fp_guard = FailPoint::fail_command( - &["insert"], - FailPointMode::Times(1), - FailCommandOptions::builder() - .write_concern_error(doc! { - "code": 91, - "errorLabels": ["RetryableWriteError"], - }) - .build(), - ) - .enable(&client, None) - .await - .unwrap(); + let fail_point = + FailPoint::new(&["insert"], FailPointMode::Times(1)).write_concern_error(doc! { + "code": 91, + "errorLabels": ["RetryableWriteError"], + }); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); let result = client .database("test") @@ -588,13 +577,12 @@ async fn retry_write_different_mongos() { log_uncaptured("skipping retry_write_different_mongos: requires failCommand"); return; } - let fail_opts = FailCommandOptions::builder() + + let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)) .error_code(6) - .error_labels(vec!["RetryableWriteError".to_string()]) - .close_connection(true) - .build(); - let fp = FailPoint::fail_command(&["insert"], FailPointMode::Times(1), Some(fail_opts)); - guards.push(client.enable_failpoint(fp, None).await.unwrap()); + .error_labels(vec![RETRYABLE_WRITE_ERROR]) + .close_connection(true); + guards.push(client.configure_fail_point(fail_point).await.unwrap()); } let client = Client::test_builder() @@ -646,13 +634,12 @@ async fn retry_write_same_mongos() { let mut client_options = client_options.clone(); client_options.direct_connection = Some(true); let client = Client::test_builder().options(client_options).build().await; - let fail_opts = FailCommandOptions::builder() + + let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)) .error_code(6) - .error_labels(vec!["RetryableWriteError".to_string()]) - .close_connection(true) - .build(); - let fp = FailPoint::fail_command(&["insert"], FailPointMode::Times(1), Some(fail_opts)); - client.enable_failpoint(fp, None).await.unwrap() + .error_labels(vec![RETRYABLE_WRITE_ERROR]) + .close_connection(true); + client.configure_fail_point(fail_point).await.unwrap() }; let client = Client::test_builder() diff --git a/src/test/spec/sdam.rs b/src/test/spec/sdam.rs index e72fd3377..547713cab 100644 --- a/src/test/spec/sdam.rs +++ b/src/test/spec/sdam.rs @@ -12,7 +12,6 @@ use crate::{ spec::unified_runner::run_unified_tests, Event, EventHandler, - FailCommandOptions, FailPoint, FailPointMode, TestClient, @@ -201,15 +200,13 @@ async fn rtt_is_updated() { assert!(events.len() > 2); // configure a failpoint that blocks hello commands - let fp = FailPoint::fail_command( + let fail_point = FailPoint::new( &["hello", LEGACY_HELLO_COMMAND_NAME], FailPointMode::Times(1000), - FailCommandOptions::builder() - .block_connection(Duration::from_millis(500)) - .app_name(app_name.to_string()) - .build(), - ); - let _gp_guard = fp.enable(&client, None).await.unwrap(); + ) + .block_connection(Duration::from_millis(500)) + .app_name(app_name); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); let mut watcher = client.topology().watch(); runtime::timeout(Duration::from_secs(10), async move { diff --git a/src/test/spec/transactions.rs b/src/test/spec/transactions.rs index 5fd40d39f..fcb9710d3 100644 --- a/src/test/spec/transactions.rs +++ b/src/test/spec/transactions.rs @@ -10,7 +10,6 @@ use crate::{ get_client_options, log_uncaptured, spec::{unified_runner::run_unified_tests, v2_runner::run_v2_tests}, - FailCommandOptions, FailPoint, FailPointMode, TestClient, @@ -215,17 +214,10 @@ async fn convenient_api_retry_timeout_commit_unknown() { .database("test_convenient") .collection::("test_convenient"); - let _fp = FailPoint::fail_command( - &["commitTransaction"], - FailPointMode::Times(1), - FailCommandOptions::builder() - .error_code(251) - .error_labels(vec![UNKNOWN_TRANSACTION_COMMIT_RESULT.to_string()]) - .build(), - ) - .enable(&client, None) - .await - .unwrap(); + let fail_point = FailPoint::new(&["commitTransaction"], FailPointMode::Times(1)) + .error_code(251) + .error_labels(vec![UNKNOWN_TRANSACTION_COMMIT_RESULT]); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); let result = session .with_transaction( @@ -269,17 +261,10 @@ async fn convenient_api_retry_timeout_commit_transient() { .database("test_convenient") .collection::("test_convenient"); - let _fp = FailPoint::fail_command( - &["commitTransaction"], - FailPointMode::Times(1), - FailCommandOptions::builder() - .error_code(251) - .error_labels(vec![TRANSIENT_TRANSACTION_ERROR.to_string()]) - .build(), - ) - .enable(&client, None) - .await - .unwrap(); + let fail_point = FailPoint::new(&["commitTransaction"], FailPointMode::Times(1)) + .error_code(251) + .error_labels(vec![TRANSIENT_TRANSACTION_ERROR]); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); let result = session .with_transaction( diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index b808c9803..701dacd05 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -73,7 +73,6 @@ use crate::{ UpdateOptions, }, runtime, - selection_criteria::ReadPreference, serde_util, test::FailPoint, Collection, @@ -1381,9 +1380,8 @@ impl TestOperation for FailPointCommand { ) -> BoxFuture<'a, ()> { async move { let client = test_runner.get_client(&self.client).await; - let guard = self - .fail_point - .enable(&client, Some(ReadPreference::Primary.into())) + let guard = client + .configure_fail_point(self.fail_point.clone()) .await .unwrap(); test_runner.fail_point_guards.write().await.push(guard); @@ -1414,16 +1412,16 @@ impl TestOperation for TargetedFailPoint { .unwrap_or_else(|| panic!("ClientSession not pinned")) }) .await; - let fail_point_guard = test_runner + let guard = test_runner .internal_client - .enable_failpoint(self.fail_point.clone(), Some(selection_criteria)) + .configure_fail_point( + self.fail_point + .clone() + .selection_criteria(selection_criteria), + ) .await .unwrap(); - test_runner - .fail_point_guards - .write() - .await - .push(fail_point_guard); + test_runner.fail_point_guards.write().await.push(guard); } .boxed() } diff --git a/src/test/spec/v2_runner.rs b/src/test/spec/v2_runner.rs index 88066d181..5e445212b 100644 --- a/src/test/spec/v2_runner.rs +++ b/src/test/spec/v2_runner.rs @@ -4,7 +4,7 @@ pub(crate) mod operation; pub(crate) mod test_event; pub(crate) mod test_file; -use std::{future::IntoFuture, ops::Deref, sync::Arc, time::Duration}; +use std::{future::IntoFuture, sync::Arc, time::Duration}; use futures::{future::BoxFuture, FutureExt}; use semver::VersionReq; @@ -23,6 +23,7 @@ use crate::{ spec::deserialize_spec_tests, util::{get_default_name, FailPointGuard}, EventClient, + FailPoint, TestClient, SERVERLESS, }, @@ -228,8 +229,12 @@ impl TestContext { // Persist fail point guards so they disable post-test. let mut fail_point_guards: Vec = Vec::new(); - if let Some(fail_point) = &test.fail_point { - fail_point_guards.push(fail_point.enable(client.deref(), None).await.unwrap()); + if let Some(ref fail_point) = test.fail_point { + let guard = client + .configure_fail_point(fail_point.clone()) + .await + .unwrap(); + fail_point_guards.push(guard); } // Start the test sessions @@ -392,7 +397,7 @@ impl<'a> OpRunner<'a> { .unwrap(); } "targetedFailPoint" => { - let fail_point = from_bson( + let fail_point: FailPoint = from_bson( operation .execute_on_client(&self.internal_client) .await @@ -408,13 +413,12 @@ impl<'a> OpRunner<'a> { .cloned() .unwrap_or_else(|| panic!("ClientSession is not pinned")); - self.fail_point_guards.push( - self.client - .deref() - .enable_failpoint(fail_point, Some(selection_criteria)) - .await - .unwrap(), - ); + let guard = self + .client + .configure_fail_point(fail_point.selection_criteria(selection_criteria)) + .await + .unwrap(); + self.fail_point_guards.push(guard); } other => panic!("unknown operation: {}", other), } diff --git a/src/test/spec/v2_runner/operation.rs b/src/test/spec/v2_runner/operation.rs index 644c0794f..1cc92a03a 100644 --- a/src/test/spec/v2_runner/operation.rs +++ b/src/test/spec/v2_runner/operation.rs @@ -5,7 +5,7 @@ use serde::{de::Deserializer, Deserialize}; use crate::{ action::Action, - bson::{doc, to_bson, Bson, Deserializer as BsonDeserializer, Document}, + bson::{doc, Bson, Deserializer as BsonDeserializer, Document}, client::session::TransactionState, db::options::ListCollectionsOptions, error::Result, @@ -942,7 +942,11 @@ pub(super) struct TargetedFailPoint { impl TestOperation for TargetedFailPoint { fn execute_on_client<'a>(&'a self, _client: &'a TestClient) -> BoxFuture>> { - async move { Ok(Some(to_bson(&self.fail_point)?)) }.boxed() + async move { + let command_document = bson::to_document(&self.fail_point).unwrap(); + Ok(Some(command_document.into())) + } + .boxed() } } diff --git a/src/test/util.rs b/src/test/util.rs index af72dc51c..98c8ebd63 100644 --- a/src/test/util.rs +++ b/src/test/util.rs @@ -7,7 +7,7 @@ mod trace; pub(crate) use self::{ event::{Event, EventClient, EventHandler}, - failpoint::{FailCommandOptions, FailPoint, FailPointGuard, FailPointMode}, + failpoint::{FailPoint, FailPointGuard, FailPointMode}, matchable::{assert_matches, eq_matches, is_expected_type, MatchErrExt, Matchable}, subscriber::EventSubscriber, }; @@ -28,7 +28,6 @@ use crate::{ bson::{doc, Bson}, client::options::ServerAddress, hello::{hello_command, HelloCommandResponse}, - selection_criteria::SelectionCriteria, }; use bson::Document; use semver::{Version, VersionReq}; @@ -366,14 +365,6 @@ impl TestClient { self.server_info.topology_version.is_some() } - pub(crate) async fn enable_failpoint( - &self, - fp: FailPoint, - criteria: impl Into>, - ) -> Result { - fp.enable(self, criteria).await - } - pub(crate) fn auth_enabled(&self) -> bool { self.client.options().credential.is_some() } diff --git a/src/test/util/failpoint.rs b/src/test/util/failpoint.rs index 345c2948a..db18ea47d 100644 --- a/src/test/util/failpoint.rs +++ b/src/test/util/failpoint.rs @@ -1,81 +1,149 @@ -use bson::{doc, Document}; -use serde::{Deserialize, Serialize, Serializer}; use std::time::Duration; -use typed_builder::TypedBuilder; + +use serde::{Deserialize, Serialize}; use crate::{ - action::Action, + bson::{doc, Document}, error::Result, - operation::append_options, - selection_criteria::SelectionCriteria, + selection_criteria::{ReadPreference, SelectionCriteria}, + test::log_uncaptured, Client, }; -// If you write a tokio test that uses this, make sure to annotate it with -// tokio::test(flavor = "multi_thread"). -// TODO RUST-1530 Make the error message here better. +impl Client { + /// Configure a fail point on this client. Any test that calls this method must use the + /// #[tokio::test(flavor = "multi_thread")] test annotation. The guard returned from this + /// method should remain in scope while the fail point is intended for use. Upon drop, the + /// guard will disable the fail point on the server. + pub(crate) async fn configure_fail_point( + &self, + fail_point: FailPoint, + ) -> Result { + let command = bson::to_document(&fail_point)?; + self.database("admin") + .run_command(command) + .selection_criteria(fail_point.selection_criteria.clone()) + .await?; + + Ok(FailPointGuard { + client: self.clone(), + failure_type: fail_point.failure_type, + selection_criteria: fail_point.selection_criteria, + }) + } +} + #[derive(Clone, Debug, Deserialize, Serialize)] -pub struct FailPoint { - #[serde(flatten)] - command: Document, +pub(crate) struct FailPoint { + /// The type of failure to configure. The current valid values are "failCommand" and + /// "failGetMoreAfterCursorCheckout". + #[serde(rename = "configureFailPoint")] + failure_type: String, + + /// The fail point's mode. + mode: FailPointMode, + + /// The data associated with the fail point. This includes the commands that should fail and + /// the error information that should be returned. + #[serde(default)] + data: Document, + + /// The selection criteria to use when configuring this fail point. + #[serde(skip, default = "primary_selection_criteria")] + selection_criteria: SelectionCriteria, +} + +fn primary_selection_criteria() -> SelectionCriteria { + ReadPreference::Primary.into() } impl FailPoint { - fn name(&self) -> &str { - self.command.get_str("configureFailPoint").unwrap() + /// Creates a new failCommand FailPoint. Call the various builder methods on the returned + /// FailPoint to configure the type of failure that should occur. + pub(crate) fn new(command_names: &[&str], mode: FailPointMode) -> Self { + let data = doc! { "failCommands": command_names }; + Self { + failure_type: "failCommand".to_string(), + mode, + data, + selection_criteria: ReadPreference::Primary.into(), + } } - /// Create a failCommand failpoint. - /// See for more info. - pub fn fail_command( - fail_commands: &[&str], - mode: FailPointMode, - options: impl Into>, - ) -> FailPoint { - let options = options.into(); - let mut data = doc! { - "failCommands": fail_commands.iter().map(|s| s.to_string()).collect::>(), - }; - append_options(&mut data, options.as_ref()).unwrap(); - - let command = doc! { - "configureFailPoint": "failCommand", - "mode": bson::to_bson(&mode).unwrap(), - "data": data, - }; - FailPoint { command } + /// The appName that a client must use to hit this fail point. + pub(crate) fn app_name(mut self, app_name: impl Into) -> Self { + self.data.insert("appName", app_name.into()); + self } - pub async fn enable( - &self, - client: &Client, - criteria: impl Into>, - ) -> Result { - let criteria = criteria.into(); - client - .database("admin") - .run_command(self.command.clone()) - .optional(criteria.clone(), |a, c| a.selection_criteria(c)) - .await?; - Ok(FailPointGuard { - failpoint_name: self.name().to_string(), - client: client.clone(), - criteria, - }) + /// How long the server should block the affected commands. Only available on 4.2.9+ servers. + pub(crate) fn block_connection(mut self, block_connection_duration: Duration) -> Self { + self.data.insert("blockConnection", true); + self.data + .insert("blockTimeMS", block_connection_duration.as_millis() as i64); + self + } + + /// Whether the server should close the connection when the client sends an affected command. + /// Defaults to false. + pub(crate) fn close_connection(mut self, close_connection: bool) -> Self { + self.data.insert("closeConnection", close_connection); + self + } + + /// The error code to include in the server's reply to an affected command. + pub(crate) fn error_code(mut self, error_code: i64) -> Self { + self.data.insert("errorCode", error_code); + self } + + /// The error labels to include in the server's reply to an affected command. Note that the + /// value passed to this method will completely override the labels that the server would + /// otherwise return. Only available on 4.4+ servers. + pub(crate) fn error_labels( + mut self, + error_labels: impl IntoIterator>, + ) -> Self { + let error_labels: Vec = error_labels.into_iter().map(Into::into).collect(); + self.data.insert("errorLabels", error_labels); + self + } + + /// The write concern error to include in the server's reply to an affected command. + pub(crate) fn write_concern_error(mut self, write_concern_error: Document) -> Self { + self.data.insert("writeConcernError", write_concern_error); + self + } + + /// The selection criteria to use when enabling this fail point. Defaults to a primary read + /// preference if unspecified. + pub(crate) fn selection_criteria(mut self, selection_criteria: SelectionCriteria) -> Self { + self.selection_criteria = selection_criteria; + self + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(unused)] +pub(crate) enum FailPointMode { + AlwaysOn, + Times(i32), + Skip(i32), + Off, + ActivationProbability(f32), } #[derive(Debug)] -pub struct FailPointGuard { +pub(crate) struct FailPointGuard { client: Client, - failpoint_name: String, - criteria: Option, + failure_type: String, + selection_criteria: SelectionCriteria, } impl Drop for FailPointGuard { fn drop(&mut self) { let client = self.client.clone(); - let name = self.failpoint_name.clone(); // This forces the Tokio runtime to not finish shutdown until this future has completed. // Unfortunately, this also means that tests using FailPointGuards have to use the @@ -84,69 +152,16 @@ impl Drop for FailPointGuard { futures::executor::block_on(async move { client .database("admin") - .run_command(doc! { "configureFailPoint": name, "mode": "off" }) - .optional(self.criteria.clone(), |a, c| a.selection_criteria(c)) + .run_command( + doc! { "configureFailPoint": self.failure_type.clone(), "mode": "off" }, + ) + .selection_criteria(self.selection_criteria.clone()) .await }) }); - if let Err(e) = result { - println!("failed disabling failpoint: {:?}", e); - } - } -} - -#[derive(Serialize)] -#[serde(rename_all = "camelCase")] -#[allow(unused)] -pub enum FailPointMode { - AlwaysOn, - Times(i32), - Skip(i32), - Off, - ActivationProbability(f32), -} - -#[serde_with::skip_serializing_none] -#[derive(Debug, Default, TypedBuilder, Serialize)] -#[builder(field_defaults(default, setter(into)))] -#[serde(rename_all = "camelCase")] -pub struct FailCommandOptions { - /// The appName that a client must use in order to hit this fail point. - app_name: Option, - - /// If non-null, how long the server should block the affected commands. - /// Only available in 4.2.9+. - #[serde(serialize_with = "serialize_block_connection")] - #[serde(flatten)] - block_connection: Option, - - /// Whether the server should hang up when the client sends an affected command - close_connection: Option, - - /// The error code to include in the server's reply to an affected command. - error_code: Option, - - /// Array of error labels to be included in the server's reply to an affected command. Passing - /// in an empty array suppresses all error labels that would otherwise be returned by the - /// server. The existence of the "errorLabels" field in the failCommand failpoint completely - /// overrides the server's normal error labels adding behaviors for the affected commands. - /// Only available in 4.4+. - error_labels: Option>, - - /// Document to be returned as a write concern error. - write_concern_error: Option, -} - -fn serialize_block_connection( - val: &Option, - serializer: S, -) -> std::result::Result { - match val { - Some(duration) => { - (doc! { "blockConnection": true, "blockTimeMS": duration.as_millis() as i64}) - .serialize(serializer) + if let Err(error) = result { + log_uncaptured(format!("failed disabling failpoint: {:?}", error)); } - None => serializer.serialize_none(), } } From a1375a882b984fb23f542b5681e3b599cc282048 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 4 Apr 2024 10:16:27 -0600 Subject: [PATCH 25/75] resync retryable writes --- .../client-bulkWrite-clientErrors.json | 350 ++++++++++++++++++ .../unified/client-bulkWrite-clientErrors.yml | 172 +++++++++ 2 files changed, 522 insertions(+) create mode 100644 src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.json create mode 100644 src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.yml diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.json b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.json new file mode 100644 index 000000000..64b1988ff --- /dev/null +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.json @@ -0,0 +1,350 @@ +{ + "description": "client bulkWrite retryable writes with client errors", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with one network error succeeds after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with two network errors fails after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.yml b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.yml new file mode 100644 index 000000000..d1b08ec37 --- /dev/null +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.yml @@ -0,0 +1,172 @@ +description: "client bulkWrite retryable writes with client errors" +schemaVersion: "1.20" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with one network error succeeds after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 4 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with two network errors fails after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 2 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectError: + isClientError: true + errorLabelsContain: ["RetryableWriteError"] # Error label added by driver. + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } From 6d2b8f2298eec02025fa2d5713ab5953d745aab5 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 4 Apr 2024 10:21:08 -0600 Subject: [PATCH 26/75] sync bypassdocumentvalidation test --- .../client-bulkWrite-options.json | 91 +++++++++++++++++++ .../client-bulkWrite-options.yml | 45 +++++++++ 2 files changed, 136 insertions(+) diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json index 56e0cb5cf..e0e1aa225 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json @@ -383,6 +383,97 @@ ] } ] + }, + { + "description": "client bulkWrite bypassDocumentValidation: false is sent", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": false, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] } ] } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml index 9c3883542..a82108c08 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml @@ -184,3 +184,48 @@ tests: collectionName: *collection0Name documents: - { _id: 1, x: 12 } + - description: "client bulkWrite bypassDocumentValidation: false is sent" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + bypassDocumentValidation: false + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + bypassDocumentValidation: false + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } From b80ac981546069ce6b3949305747f4b749611d6b Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 4 Apr 2024 10:33:26 -0600 Subject: [PATCH 27/75] add partial result check --- src/test/bulk_write.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 82afed90b..0c84156ab 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -189,4 +189,10 @@ async fn write_concern_errors_are_collected() { }; assert_eq!(bulk_write_error.write_concern_errors.len(), 2); + + let partial_result = bulk_write_error.partial_result.unwrap(); + assert_eq!( + partial_result.inserted_count as usize, + max_write_batch_size + 1 + ); } From d566c998ec01bc97c9ab175d6d3e960604461b98 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 4 Apr 2024 11:34:10 -0600 Subject: [PATCH 28/75] prose test updates --- src/action/bulk_write.rs | 11 +-- src/operation/bulk_write.rs | 12 ++-- src/test/bulk_write.rs | 136 +++++++++++++++++++++++++----------- 3 files changed, 108 insertions(+), 51 deletions(-) diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index b9383b224..def9c4877 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -191,10 +191,13 @@ impl ExecutionStatus { Self::Error(ref error) => { match *error.kind { ErrorKind::ClientBulkWrite(ref bulk_write_error) => { - // A top-level error is always fatal. If an individual operation fails - // during an ordered bulk write, no more batches should be executed. - !(error.source.is_some() - || (ordered && !bulk_write_error.write_errors.is_empty())) + // A top-level error is always fatal. + let top_level_error_occurred = error.source.is_some(); + // A write error occurring during an ordered bulk write is fatal. + let terminal_write_error_occurred = + ordered && !bulk_write_error.write_errors.is_empty(); + + !top_level_error_occurred && !terminal_write_error_occurred } // A top-level error is always fatal. _ => false, diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index d401c8c8f..08be24680 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -77,21 +77,21 @@ impl<'a> BulkWrite<'a> { let result = &mut error.partial_result; while let Some(response) = stream.try_next().await? { + let index = response.index + self.offset; match response.result { SingleOperationResult::Success { n, n_modified, upserted, } => { - let result_index = response.index + self.offset; let model = self.get_model(response.index)?; match model.operation_type() { OperationType::Insert => { - let inserted_id = self.get_inserted_id(result_index)?; + let inserted_id = self.get_inserted_id(index)?; let insert_result = InsertOneResult { inserted_id }; result .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) - .add_insert_result(result_index, insert_result); + .add_insert_result(index, insert_result); } OperationType::Update => { let modified_count = @@ -107,18 +107,18 @@ impl<'a> BulkWrite<'a> { }; result .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) - .add_update_result(result_index, update_result); + .add_update_result(index, update_result); } OperationType::Delete => { let delete_result = DeleteResult { deleted_count: n }; result .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) - .add_delete_result(result_index, delete_result); + .add_delete_result(index, delete_result); } } } SingleOperationResult::Error(write_error) => { - error.write_errors.insert(response.index, write_error); + error.write_errors.insert(index, write_error); } } } diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 0c84156ab..6e36c15d2 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -16,6 +16,7 @@ use crate::{ EventHandler, FailPoint, FailPointMode, + TestClient, }, Client, Namespace, @@ -109,6 +110,100 @@ async fn max_message_size_bytes_batching() { assert_eq!(second_len, 1); } +#[tokio::test(flavor = "multi_thread")] +async fn write_concern_error_batches() { + let mut options = get_client_options().await.clone(); + options.retry_writes = Some(false); + + let handler = Arc::new(EventHandler::new()); + let client = Client::test_builder() + .options(options) + .event_handler(handler.clone()) + .build() + .await; + let mut subscriber = handler.subscribe(); + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping write_concern_error_batches: bulkWrite requires 8.0+"); + return; + } + + let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; + + let fail_point = FailPoint::new(&["bulkWrite"], FailPointMode::Times(2)) + .write_concern_error(doc! { "code": 91, "errmsg": "Replication is being shut down" }); + let _guard = client.configure_fail_point(fail_point).await.unwrap(); + + let models = vec![ + WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document: doc! { "a": "b" } + }; + max_write_batch_size + 1 + ]; + let error = client.bulk_write(models).ordered(false).await.unwrap_err(); + + let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { + panic!("Expected bulk write error, got {:?}", error); + }; + + assert_eq!(bulk_write_error.write_concern_errors.len(), 2); + + let partial_result = bulk_write_error.partial_result.unwrap(); + assert_eq!( + partial_result.inserted_count as usize, + max_write_batch_size + 1 + ); +} + +#[tokio::test] +async fn write_error_batches() { + let client = TestClient::new().await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping write_error_batches: bulkWrite requires 8.0+"); + return; + } + + let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; + + let document = doc! { "_id": 1 }; + let collection = client.database("db").collection("coll"); + collection.drop().await.unwrap(); + collection.insert_one(document.clone(), None).await.unwrap(); + + let models = vec![ + WriteModel::InsertOne { + namespace: collection.namespace(), + document, + }; + max_write_batch_size + 1 + ]; + + let error = client + .bulk_write(models.clone()) + .ordered(false) + .await + .unwrap_err(); + + let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { + panic!("Expected bulk write error, got {:?}", error); + }; + + assert_eq!( + bulk_write_error.write_errors.len(), + max_write_batch_size + 1 + ); + + let error = client.bulk_write(models).await.unwrap_err(); + + let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { + panic!("Expected bulk write error, got {:?}", error); + }; + + assert_eq!(bulk_write_error.write_errors.len(), 1); +} + #[tokio::test] async fn cursor_iteration() { let handler = Arc::new(EventHandler::new()); @@ -119,7 +214,6 @@ async fn cursor_iteration() { let mut subscriber = handler.subscribe(); let max_bson_object_size = client.server_info.max_bson_object_size as usize; - // 8.0+ servers always report this value let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; let id_size = max_bson_object_size / max_write_batch_size; @@ -147,7 +241,6 @@ async fn cursor_iteration() { }; assert!(bulk_write_error.write_concern_errors.is_empty()); - // assert!(bulk_write_error.partial_result.is_none()); let write_errors = bulk_write_error.write_errors; assert_eq!(write_errors.len(), max_write_batch_size); @@ -157,42 +250,3 @@ async fn cursor_iteration() { .await .expect("no getMore observed"); } - -#[tokio::test(flavor = "multi_thread")] -async fn write_concern_errors_are_collected() { - let mut options = get_client_options().await.clone(); - options.retry_writes = Some(false); - let client = Client::test_builder().options(options).build().await; - - if client.server_version_lt(8, 0) { - log_uncaptured("skipping max_write_batch_size_batching: bulkWrite requires 8.0+"); - return; - } - - let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; - - let fail_point = FailPoint::new(&["bulkWrite"], FailPointMode::Times(2)) - .write_concern_error(doc! { "code": 91, "errmsg": "Replication is being shut down" }); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); - - let models = vec![ - WriteModel::InsertOne { - namespace: Namespace::new("db", "coll"), - document: doc! { "a": "b" } - }; - max_write_batch_size + 1 - ]; - let error = client.bulk_write(models).ordered(false).await.unwrap_err(); - - let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { - panic!("Expected bulk write error, got {:?}", error); - }; - - assert_eq!(bulk_write_error.write_concern_errors.len(), 2); - - let partial_result = bulk_write_error.partial_result.unwrap(); - assert_eq!( - partial_result.inserted_count as usize, - max_write_batch_size + 1 - ); -} From 37e0a7341c660f83875bc4eb437bd5d2023dc46e Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 4 Apr 2024 13:28:57 -0600 Subject: [PATCH 29/75] Merge branch 'main' into bulk-write-merge --- .evergreen/config.yml | 36 +- .evergreen/run-tests.sh | 12 +- Cargo.toml | 1 + action_macro/.gitignore | 1 + action_macro/Cargo.toml | 15 + action_macro/src/lib.rs | 326 ++++++++++ manual/src/README.md | 2 +- manual/src/encryption.md | 30 +- manual/src/reading.md | 9 +- manual/src/tracing.md | 4 +- rustfmt.toml | 3 +- src/action.rs | 125 ++-- src/action/aggregate.rs | 110 ++-- src/action/bulk_write.rs | 77 ++- src/action/count.rs | 66 +- src/action/create_collection.rs | 18 +- src/action/create_index.rs | 70 +- src/action/csfle/create_data_key.rs | 5 +- .../csfle/create_encrypted_collection.rs | 85 +-- src/action/csfle/encrypt.rs | 8 +- src/action/delete.rs | 43 +- src/action/distinct.rs | 49 +- src/action/drop.rs | 45 +- src/action/drop_index.rs | 61 +- src/action/find.rs | 213 ++++++ src/action/find_and_modify.rs | 318 +++++++++ src/action/gridfs.rs | 15 + src/action/gridfs/delete.rs | 68 ++ src/action/gridfs/download.rs | 168 +++++ src/action/gridfs/drop.rs | 39 ++ src/action/gridfs/find.rs | 126 ++++ src/action/gridfs/rename.rs | 53 ++ src/action/gridfs/upload.rs | 87 +++ src/action/insert_many.rs | 198 ++++++ src/action/insert_one.rs | 114 ++++ src/action/list_collections.rs | 133 ++-- src/action/list_databases.rs | 86 +-- src/action/list_indexes.rs | 138 ++-- src/action/perf.rs | 2 +- src/action/replace_one.rs | 107 +++ src/action/run_command.rs | 171 ++--- src/action/search_index.rs | 283 ++++++++ src/action/session.rs | 29 +- src/action/shutdown.rs | 16 +- src/action/transaction.rs | 235 +++++++ src/action/update.rs | 62 +- src/action/watch.rs | 119 ++-- src/bson_util.rs | 7 +- src/change_stream.rs | 2 +- src/change_stream/session.rs | 2 +- src/checked.rs | 502 ++++++++++++++ src/client/action/perf.rs | 29 +- src/client/action/shutdown.rs | 45 +- src/client/auth.rs | 75 ++- src/client/auth/oidc.rs | 458 +++++++++++-- src/client/csfle/client_encryption.rs | 9 +- .../client_encryption/create_data_key.rs | 35 +- src/client/csfle/client_encryption/encrypt.rs | 62 +- src/client/csfle/state_machine.rs | 9 +- src/client/executor.rs | 27 +- src/client/options.rs | 161 +++-- src/client/options/test.rs | 342 +++++----- src/client/session.rs | 364 +---------- src/client/session/action.rs | 388 +++++++++++ src/client/session/test.rs | 131 ++-- src/client/session/test/causal_consistency.rs | 125 ++-- src/cmap/conn.rs | 71 +- src/cmap/conn/wire/message.rs | 173 +++-- src/cmap/establish.rs | 5 + src/cmap/establish/handshake.rs | 69 +- src/cmap/establish/handshake/test.rs | 10 + src/cmap/test.rs | 38 +- src/cmap/test/event.rs | 83 +-- src/cmap/test/integration.rs | 33 +- src/coll.rs | 549 +--------------- src/coll/action/drop.rs | 32 +- src/coll/options.rs | 9 + src/collation.rs | 2 +- src/compression.rs | 322 +-------- src/compression/compress.rs | 80 +++ src/compression/compressors.rs | 140 ++++ src/compression/decompress.rs | 70 ++ src/compression/test.rs | 113 ---- src/concern/test.rs | 372 +++++------ src/cursor.rs | 18 +- src/cursor/session.rs | 30 +- src/db.rs | 4 +- src/db/action/create_collection.rs | 77 +-- src/error.rs | 15 +- src/gridfs.rs | 90 +-- src/gridfs/download.rs | 231 +------ src/gridfs/upload.rs | 259 ++------ src/hello.rs | 8 +- src/lib.rs | 19 +- src/operation.rs | 2 +- src/operation/bulk_write.rs | 4 +- src/operation/find.rs | 12 +- src/operation/find_and_modify.rs | 98 +-- src/operation/find_and_modify/options.rs | 16 +- src/operation/get_more.rs | 5 +- src/operation/insert.rs | 80 +-- src/operation/insert/test.rs | 153 ----- src/operation/update.rs | 66 +- src/options.rs | 7 +- .../description/topology/server_selection.rs | 48 +- .../server_selection/test/in_window.rs | 59 +- .../topology/server_selection/test/logic.rs | 37 +- src/sdam/description/topology/test.rs | 1 + src/sdam/description/topology/test/sdam.rs | 38 +- src/sdam/test.rs | 28 +- src/search_index.rs | 82 +-- src/selection_criteria.rs | 216 +++---- src/serde_util.rs | 22 +- src/sync/change_stream.rs | 4 +- src/sync/client/session.rs | 172 +---- src/sync/coll.rs | 348 +--------- src/sync/cursor.rs | 38 +- src/sync/db.rs | 4 +- src/sync/gridfs.rs | 123 +--- src/sync/test.rs | 64 +- src/test.rs | 83 +-- src/test/atlas_connectivity.rs | 2 +- src/test/auth_aws.rs | 22 +- src/test/bulk_write.rs | 98 +-- src/test/change_stream.rs | 93 ++- src/test/client.rs | 88 ++- src/test/coll.rs | 221 +++---- src/test/compression.rs | 28 + src/test/csfle.rs | 223 +++---- src/test/cursor.rs | 115 ++-- src/test/db.rs | 14 +- src/test/documentation_examples.rs | 611 +++++++----------- .../aggregation_data.rs | 455 +++++++------ src/test/index_management.rs | 71 +- src/test/spec/auth.rs | 2 +- src/test/spec/connection_stepdown.rs | 55 +- src/test/spec/crud_v1.rs | 5 +- src/test/spec/crud_v1/aggregate.rs | 2 +- src/test/spec/crud_v1/count.rs | 2 +- src/test/spec/crud_v1/delete_many.rs | 2 +- src/test/spec/crud_v1/delete_one.rs | 2 +- src/test/spec/crud_v1/distinct.rs | 2 +- src/test/spec/crud_v1/find.rs | 5 +- src/test/spec/crud_v1/find_one_and_delete.rs | 5 +- src/test/spec/crud_v1/find_one_and_replace.rs | 5 +- src/test/spec/crud_v1/find_one_and_update.rs | 5 +- src/test/spec/crud_v1/insert_many.rs | 11 +- src/test/spec/crud_v1/insert_one.rs | 4 +- src/test/spec/crud_v1/replace_one.rs | 5 +- src/test/spec/crud_v1/update_many.rs | 2 +- src/test/spec/crud_v1/update_one.rs | 2 +- src/test/spec/gridfs.rs | 79 ++- src/test/spec/index_management.rs | 49 +- .../legacy/fle2v2-Range-Date-Aggregate.json | 3 +- .../legacy/fle2v2-Range-Date-Aggregate.yml | 2 + .../legacy/fle2v2-Range-Date-Correctness.json | 3 +- .../legacy/fle2v2-Range-Date-Correctness.yml | 2 + .../legacy/fle2v2-Range-Date-Delete.json | 3 +- .../legacy/fle2v2-Range-Date-Delete.yml | 2 + .../fle2v2-Range-Date-FindOneAndUpdate.json | 3 +- .../fle2v2-Range-Date-FindOneAndUpdate.yml | 2 + .../legacy/fle2v2-Range-Date-InsertFind.json | 3 +- .../legacy/fle2v2-Range-Date-InsertFind.yml | 2 + .../legacy/fle2v2-Range-Date-Update.json | 3 +- .../legacy/fle2v2-Range-Date-Update.yml | 2 + .../fle2v2-Range-Decimal-Aggregate.json | 3 +- .../legacy/fle2v2-Range-Decimal-Aggregate.yml | 2 + .../fle2v2-Range-Decimal-Correctness.json | 3 +- .../fle2v2-Range-Decimal-Correctness.yml | 2 + .../legacy/fle2v2-Range-Decimal-Delete.json | 3 +- .../legacy/fle2v2-Range-Decimal-Delete.yml | 2 + ...fle2v2-Range-Decimal-FindOneAndUpdate.json | 3 +- .../fle2v2-Range-Decimal-FindOneAndUpdate.yml | 2 + .../fle2v2-Range-Decimal-InsertFind.json | 3 +- .../fle2v2-Range-Decimal-InsertFind.yml | 2 + .../legacy/fle2v2-Range-Decimal-Update.json | 3 +- .../legacy/fle2v2-Range-Decimal-Update.yml | 2 + ...e2v2-Range-DecimalPrecision-Aggregate.json | 3 +- ...le2v2-Range-DecimalPrecision-Aggregate.yml | 2 + ...v2-Range-DecimalPrecision-Correctness.json | 3 +- ...2v2-Range-DecimalPrecision-Correctness.yml | 2 + .../fle2v2-Range-DecimalPrecision-Delete.json | 3 +- .../fle2v2-Range-DecimalPrecision-Delete.yml | 2 + ...nge-DecimalPrecision-FindOneAndUpdate.json | 3 +- ...ange-DecimalPrecision-FindOneAndUpdate.yml | 2 + ...2v2-Range-DecimalPrecision-InsertFind.json | 3 +- ...e2v2-Range-DecimalPrecision-InsertFind.yml | 2 + .../fle2v2-Range-DecimalPrecision-Update.json | 3 +- .../fle2v2-Range-DecimalPrecision-Update.yml | 2 + .../legacy/fle2v2-Range-Double-Aggregate.json | 3 +- .../legacy/fle2v2-Range-Double-Aggregate.yml | 2 + .../fle2v2-Range-Double-Correctness.json | 3 +- .../fle2v2-Range-Double-Correctness.yml | 2 + .../legacy/fle2v2-Range-Double-Delete.json | 3 +- .../legacy/fle2v2-Range-Double-Delete.yml | 2 + .../fle2v2-Range-Double-FindOneAndUpdate.json | 3 +- .../fle2v2-Range-Double-FindOneAndUpdate.yml | 2 + .../fle2v2-Range-Double-InsertFind.json | 3 +- .../legacy/fle2v2-Range-Double-InsertFind.yml | 2 + .../legacy/fle2v2-Range-Double-Update.json | 3 +- .../legacy/fle2v2-Range-Double-Update.yml | 2 + ...le2v2-Range-DoublePrecision-Aggregate.json | 3 +- ...fle2v2-Range-DoublePrecision-Aggregate.yml | 2 + ...2v2-Range-DoublePrecision-Correctness.json | 3 +- ...e2v2-Range-DoublePrecision-Correctness.yml | 2 + .../fle2v2-Range-DoublePrecision-Delete.json | 3 +- .../fle2v2-Range-DoublePrecision-Delete.yml | 2 + ...ange-DoublePrecision-FindOneAndUpdate.json | 3 +- ...Range-DoublePrecision-FindOneAndUpdate.yml | 2 + ...e2v2-Range-DoublePrecision-InsertFind.json | 3 +- ...le2v2-Range-DoublePrecision-InsertFind.yml | 2 + .../fle2v2-Range-DoublePrecision-Update.json | 3 +- .../fle2v2-Range-DoublePrecision-Update.yml | 2 + .../legacy/fle2v2-Range-Int-Aggregate.json | 3 +- .../legacy/fle2v2-Range-Int-Aggregate.yml | 2 + .../legacy/fle2v2-Range-Int-Correctness.json | 3 +- .../legacy/fle2v2-Range-Int-Correctness.yml | 2 + .../legacy/fle2v2-Range-Int-Delete.json | 3 +- .../legacy/fle2v2-Range-Int-Delete.yml | 2 + .../fle2v2-Range-Int-FindOneAndUpdate.json | 3 +- .../fle2v2-Range-Int-FindOneAndUpdate.yml | 2 + .../legacy/fle2v2-Range-Int-InsertFind.json | 3 +- .../legacy/fle2v2-Range-Int-InsertFind.yml | 2 + .../legacy/fle2v2-Range-Int-Update.json | 3 +- .../legacy/fle2v2-Range-Int-Update.yml | 2 + .../legacy/fle2v2-Range-Long-Aggregate.json | 3 +- .../legacy/fle2v2-Range-Long-Aggregate.yml | 2 + .../legacy/fle2v2-Range-Long-Correctness.json | 3 +- .../legacy/fle2v2-Range-Long-Correctness.yml | 2 + .../legacy/fle2v2-Range-Long-Delete.json | 3 +- .../legacy/fle2v2-Range-Long-Delete.yml | 2 + .../fle2v2-Range-Long-FindOneAndUpdate.json | 3 +- .../fle2v2-Range-Long-FindOneAndUpdate.yml | 2 + .../legacy/fle2v2-Range-Long-InsertFind.json | 3 +- .../legacy/fle2v2-Range-Long-InsertFind.yml | 2 + .../legacy/fle2v2-Range-Long-Update.json | 3 +- .../legacy/fle2v2-Range-Long-Update.yml | 2 + .../legacy/fle2v2-Range-WrongType.json | 3 +- .../legacy/fle2v2-Range-WrongType.yml | 2 + src/test/spec/oidc.rs | 83 ++- src/test/spec/retryable_reads.rs | 53 +- src/test/spec/retryable_writes.rs | 88 ++- src/test/spec/sdam.rs | 91 ++- src/test/spec/sessions.rs | 39 +- src/test/spec/trace.rs | 47 +- src/test/spec/transactions.rs | 113 ++-- src/test/spec/unified_runner.rs | 1 - src/test/spec/unified_runner/entity.rs | 126 +++- src/test/spec/unified_runner/observer.rs | 105 --- src/test/spec/unified_runner/operation.rs | 144 ++--- .../unified_runner/operation/search_index.rs | 24 +- src/test/spec/unified_runner/test_file.rs | 27 +- src/test/spec/unified_runner/test_runner.rs | 81 ++- src/test/spec/v2_runner.rs | 22 +- src/test/spec/v2_runner/csfle.rs | 2 +- src/test/spec/v2_runner/operation.rs | 159 ++--- src/test/spec/v2_runner/test_file.rs | 8 +- src/test/spec/write_error.rs | 13 +- src/test/util.rs | 79 +-- src/test/util/event.rs | 590 ++--------------- src/test/util/event_buffer.rs | 484 ++++++++++++++ src/test/util/subscriber.rs | 118 ---- src/test/util/trace.rs | 21 +- tests/readme_examples.rs | 13 +- tests/transactions_example.rs | 24 +- 265 files changed, 9030 insertions(+), 7511 deletions(-) create mode 100644 action_macro/.gitignore create mode 100644 action_macro/Cargo.toml create mode 100644 action_macro/src/lib.rs create mode 100644 src/action/find.rs create mode 100644 src/action/find_and_modify.rs create mode 100644 src/action/gridfs.rs create mode 100644 src/action/gridfs/delete.rs create mode 100644 src/action/gridfs/download.rs create mode 100644 src/action/gridfs/drop.rs create mode 100644 src/action/gridfs/find.rs create mode 100644 src/action/gridfs/rename.rs create mode 100644 src/action/gridfs/upload.rs create mode 100644 src/action/insert_many.rs create mode 100644 src/action/insert_one.rs create mode 100644 src/action/replace_one.rs create mode 100644 src/action/search_index.rs create mode 100644 src/action/transaction.rs create mode 100644 src/checked.rs create mode 100644 src/client/session/action.rs create mode 100644 src/compression/compress.rs create mode 100644 src/compression/compressors.rs create mode 100644 src/compression/decompress.rs delete mode 100644 src/compression/test.rs delete mode 100644 src/operation/insert/test.rs create mode 100644 src/test/compression.rs delete mode 100644 src/test/spec/unified_runner/observer.rs create mode 100644 src/test/util/event_buffer.rs delete mode 100644 src/test/util/subscriber.rs diff --git a/.evergreen/config.yml b/.evergreen/config.yml index 5f16a6e58..165d95c20 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -171,11 +171,10 @@ buildvariants: run_on: - rhel87-small expansions: - COMPRESSION: true AUTH: auth SSL: ssl tasks: - - .rapid .replicaset + - .compression - name: stable-api display_name: "Stable API V1" @@ -297,7 +296,7 @@ buildvariants: display_name: OIDC patchable: false run_on: - - rhel87-small + - ubuntu2204-small expansions: AUTH: auth SSL: ssl @@ -872,7 +871,30 @@ tasks: TOPOLOGY: sharded_cluster - func: "run driver test suite" - - name: test-compression + - name: test-zstd-compression + tags: [compression] + commands: + - func: "bootstrap mongo-orchestration" + vars: + MONGODB_VERSION: rapid + TOPOLOGY: replica_set + - func: "run driver test suite" + vars: + ZSTD: true + + - name: test-zlib-compression + tags: [compression] + commands: + - func: "bootstrap mongo-orchestration" + vars: + MONGODB_VERSION: rapid + TOPOLOGY: replica_set + - func: "run driver test suite" + vars: + ZLIB: true + + - name: test-snappy-compression + tags: [compression] commands: - func: "bootstrap mongo-orchestration" vars: @@ -880,7 +902,7 @@ tasks: TOPOLOGY: replica_set - func: "run driver test suite" vars: - COMPRESSION: true + SNAPPY: true - name: test-aws-auth-regular-credentials tags: [aws-auth] @@ -1485,10 +1507,12 @@ functions: include_expansions_in_env: - PROJECT_DIRECTORY - OPENSSL - - COMPRESSION - MONGODB_URI - MONGODB_API_VERSION - PATH + - ZSTD + - ZLIB + - SNAPPY "run sync tests": - command: subprocess.exec diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index f89213d63..41df7ef0d 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -12,8 +12,16 @@ if [ "$OPENSSL" = true ]; then FEATURE_FLAGS+=("openssl-tls") fi -if [ "$COMPRESSION" = true ]; then - FEATURE_FLAGS+=("snappy-compression", "zlib-compression", "zstd-compression") +if [ "$ZSTD" = true ]; then + FEATURE_FLAGS+=("zstd-compression") +fi + +if [ "$ZLIB" = true ]; then + FEATURE_FLAGS+=("zlib-compression") +fi + +if [ "$SNAPPY" = true ]; then + FEATURE_FLAGS+=("snappy-compression") fi export SESSION_TEST_REQUIRE_MONGOCRYPTD=true diff --git a/Cargo.toml b/Cargo.toml index 9d0ed742b..6bf1b3540 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,6 +69,7 @@ in-use-encryption-unstable = ["mongocrypt", "rayon", "num_cpus"] tracing-unstable = ["tracing", "log"] [dependencies] +action_macro = { path = "action_macro" } async-trait = "0.1.42" base64 = "0.13.0" bitflags = "1.1.0" diff --git a/action_macro/.gitignore b/action_macro/.gitignore new file mode 100644 index 000000000..b83d22266 --- /dev/null +++ b/action_macro/.gitignore @@ -0,0 +1 @@ +/target/ diff --git a/action_macro/Cargo.toml b/action_macro/Cargo.toml new file mode 100644 index 000000000..5d3a69a54 --- /dev/null +++ b/action_macro/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "action_macro" +version = "0.1.0" +edition = "2021" +license = "Apache-2.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +proc-macro2 = "1.0.78" +quote = "1.0.35" +syn = { version = "2.0.52", features = ["full", "parsing", "proc-macro", "extra-traits"] } + +[lib] +proc-macro = true diff --git a/action_macro/src/lib.rs b/action_macro/src/lib.rs new file mode 100644 index 000000000..f68edbce3 --- /dev/null +++ b/action_macro/src/lib.rs @@ -0,0 +1,326 @@ +extern crate proc_macro; + +use proc_macro2::Span; +use quote::{quote, ToTokens}; +use syn::{ + braced, + parenthesized, + parse::{Parse, ParseStream}, + parse_macro_input, + parse_quote, + parse_quote_spanned, + spanned::Spanned, + visit_mut::VisitMut, + Block, + Error, + Expr, + Generics, + Ident, + ImplItemFn, + Lifetime, + Lit, + Meta, + PathArguments, + Token, + Type, +}; + +/// Generates: +/// * an `IntoFuture` executing the given method body +/// * an opaque wrapper type for the future in case we want to do something more fancy than +/// BoxFuture. +/// * a `run` method for sync execution, optionally with a wrapper function +#[proc_macro_attribute] +pub fn action_impl( + attrs: proc_macro::TokenStream, + input: proc_macro::TokenStream, +) -> proc_macro::TokenStream { + let ActionImplAttrs { sync_type } = parse_macro_input!(attrs as ActionImplAttrs); + let ActionImpl { + generics, + lifetime, + action, + future_name, + exec_self_mut, + exec_output, + exec_body, + } = parse_macro_input!(input as ActionImpl); + + let mut unbounded_generics = generics.clone(); + for lt in unbounded_generics.lifetimes_mut() { + lt.bounds.clear(); + } + for ty in unbounded_generics.type_params_mut() { + ty.bounds.clear(); + } + + let sync_run = if let Some(sync_type) = sync_type { + // In expression position, the type needs to be of the form Foo::, not Foo + let mut formal = sync_type.clone(); + struct Visitor; + impl VisitMut for Visitor { + fn visit_path_segment_mut(&mut self, segment: &mut syn::PathSegment) { + if let PathArguments::AngleBracketed(args) = &mut segment.arguments { + if args.colon2_token.is_none() { + args.colon2_token = Some(Token![::](Span::call_site())); + } + } + } + } + syn::visit_mut::visit_type_mut(&mut Visitor, &mut formal); + quote! { + /// Synchronously execute this action. + pub fn run(self) -> Result<#sync_type> { + crate::sync::TOKIO_RUNTIME.block_on(std::future::IntoFuture::into_future(self)).map(#formal::new) + } + } + } else { + quote! { + /// Synchronously execute this action. + pub fn run(self) -> #exec_output { + crate::sync::TOKIO_RUNTIME.block_on(std::future::IntoFuture::into_future(self)) + } + } + }; + + quote! { + impl #generics crate::action::private::Sealed for #action { } + + impl #generics crate::action::Action for #action { } + + impl #generics std::future::IntoFuture for #action { + type Output = #exec_output; + type IntoFuture = #future_name #unbounded_generics; + + fn into_future(#exec_self_mut self) -> Self::IntoFuture { + #future_name (Box::pin(async move { + #exec_body + })) + } + } + + pub struct #future_name #generics (crate::BoxFuture<#lifetime, #exec_output>); + + impl #generics std::future::Future for #future_name #unbounded_generics { + type Output = #exec_output; + + fn poll(mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll { + self.0.as_mut().poll(cx) + } + } + + #[cfg(feature = "sync")] + impl #generics #action { + #sync_run + } + }.into() +} + +// impl Action for ActionType { +// type Future = FutureName; +// async fn execute([mut] self) -> OutType { } +// [SyncWrap] +// } +struct ActionImpl { + generics: Generics, + lifetime: Lifetime, + action: Type, + future_name: Ident, + exec_self_mut: Option, + exec_output: Type, + exec_body: Block, +} + +impl Parse for ActionImpl { + fn parse(input: ParseStream) -> syn::Result { + // impl Action for ActionType + input.parse::()?; + let generics: Generics = input.parse()?; + let mut lifetime = None; + for lt in generics.lifetimes() { + if lifetime.is_some() { + return Err(input.error("only one lifetime argument permitted")); + } + lifetime = Some(lt); + } + let lifetime = match lifetime { + Some(lt) => lt.lifetime.clone(), + None => parse_quote_spanned! { generics.span() => 'static }, + }; + parse_name(input, "Action")?; + input.parse::()?; + let action = input.parse()?; + + let impl_body; + braced!(impl_body in input); + + // type Future = FutureName; + impl_body.parse::()?; + parse_name(&impl_body, "Future")?; + impl_body.parse::()?; + let future_name = impl_body.parse()?; + impl_body.parse::()?; + + // async fn execute([mut] self) -> OutType { } + impl_body.parse::()?; + impl_body.parse::()?; + parse_name(&impl_body, "execute")?; + let exec_args; + parenthesized!(exec_args in impl_body); + let exec_self_mut = exec_args.parse()?; + exec_args.parse::()?; + if !exec_args.is_empty() { + return Err(exec_args.error("unexpected token")); + } + impl_body.parse::]>()?; + let exec_output = impl_body.parse()?; + let exec_body = impl_body.parse()?; + + if !impl_body.is_empty() { + return Err(exec_args.error("unexpected token")); + } + + Ok(ActionImpl { + generics, + lifetime, + action, + future_name, + exec_self_mut, + exec_output, + exec_body, + }) + } +} + +struct ActionImplAttrs { + sync_type: Option, +} + +impl Parse for ActionImplAttrs { + fn parse(input: ParseStream) -> syn::Result { + let mut out = Self { sync_type: None }; + if input.is_empty() { + return Ok(out); + } + + parse_name(input, "sync")?; + input.parse::()?; + out.sync_type = Some(input.parse()?); + Ok(out) + } +} + +/// Parse an identifier with a specific expected value. +fn parse_name(input: ParseStream, name: &str) -> syn::Result<()> { + let ident = input.parse::()?; + if ident.to_string() != name { + return Err(Error::new( + ident.span(), + format!("expected '{}', got '{}'", name, ident), + )); + } + Ok(()) +} + +/// Enables rustdoc links to types that link individually to each type +/// component. +#[proc_macro_attribute] +pub fn deeplink( + _attr: proc_macro::TokenStream, + item: proc_macro::TokenStream, +) -> proc_macro::TokenStream { + let mut impl_fn = parse_macro_input!(item as ImplItemFn); + + for attr in &mut impl_fn.attrs { + // Skip non-`doc` attrs + if attr.path() != &parse_quote! { doc } { + continue; + } + // Get the string literal value from #[doc = "lit"] + let mut text = match &mut attr.meta { + Meta::NameValue(nv) => match &mut nv.value { + Expr::Lit(el) => match &mut el.lit { + Lit::Str(ls) => ls.value(), + _ => continue, + }, + _ => continue, + }, + _ => continue, + }; + // Process substrings delimited by "d[...]" + while let Some(ix) = text.find("d[") { + let pre = &text[..ix]; + let rest = &text[ix + 2..]; + let end = match rest.find(']') { + Some(v) => v, + None => { + return Error::new(attr.span(), "unterminated d[") + .into_compile_error() + .into() + } + }; + let body = &rest[..end]; + let post = &rest[end + 1..]; + // Strip inner backticks, if any + let (fixed, body) = if body.starts_with('`') && body.ends_with('`') { + ( + true, + body.strip_prefix('`').unwrap().strip_suffix('`').unwrap(), + ) + } else { + (false, body) + }; + // Build new string + let mut new_text = pre.to_owned(); + if fixed { + new_text.push_str(""); + } + new_text.push_str(&text_link(body)); + if fixed { + new_text.push_str(""); + } + new_text.push_str(post); + text = new_text; + } + *attr = parse_quote! { #[doc = #text] }; + } + + impl_fn.into_token_stream().into() +} + +fn text_link(text: &str) -> String { + // Break into segments delimited by '<' or '>' + let segments = text.split_inclusive(&['<', '>']) + // Put each delimiter in its own segment + .flat_map(|s| { + if s == "<" || s == ">" { + vec![s] + } else if let Some(sub) = s.strip_suffix(&['<', '>']) { + vec![sub, &s[sub.len()..]] + } else { + vec![s] + } + }); + + // Build output + let mut out = vec![]; + for segment in segments { + match segment { + // Escape angle brackets + "<" => out.push("<"), + ">" => out.push(">"), + // Don't link unit + "()" => out.push("()"), + // Link to types + _ => { + // Use the short name + let short = segment + .rsplit_once("::") + .map(|(_, short)| short) + .unwrap_or(segment); + out.extend(["[", short, "](", segment, ")"]); + } + } + } + out.concat() +} diff --git a/manual/src/README.md b/manual/src/README.md index 2e8cbfa02..b4def29c7 100644 --- a/manual/src/README.md +++ b/manual/src/README.md @@ -23,7 +23,7 @@ e.g. # let client = Client::with_uri_str("mongodb://example.com").await?; let collection = client.database("foo").collection("bar"); let handle = tokio::task::spawn(async move { - collection.insert_one(doc! { "x": 1 }, None).await + collection.insert_one(doc! { "x": 1 }).await }); tokio::time::timeout(Duration::from_secs(5), handle).await???; diff --git a/manual/src/encryption.md b/manual/src/encryption.md index 4c9e4e1f8..0fbfd9a79 100644 --- a/manual/src/encryption.md +++ b/manual/src/encryption.md @@ -187,16 +187,16 @@ async fn main() -> Result<()> { // Clear old data. coll.drop().await?; - coll.insert_one(doc! { "encryptedField": "123456789" }, None) + coll.insert_one(doc! { "encryptedField": "123456789" }) .await?; - println!("Decrypted document: {:?}", coll.find_one(None, None).await?); + println!("Decrypted document: {:?}", coll.find_one(doc! {}).await?); let unencrypted_coll = Client::with_uri_str(URI) .await? .database(&encrypted_namespace.db) .collection::(&encrypted_namespace.coll); println!( "Encrypted document: {:?}", - unencrypted_coll.find_one(None, None).await? + unencrypted_coll.find_one(doc! {}).await? ); Ok(()) @@ -294,19 +294,19 @@ async fn main() -> Result<()> { .validator(doc! { "$jsonSchema": schema }) .await?; - coll.insert_one(doc! { "encryptedField": "123456789" }, None) + coll.insert_one(doc! { "encryptedField": "123456789" }) .await?; - println!("Decrypted document: {:?}", coll.find_one(None, None).await?); + println!("Decrypted document: {:?}", coll.find_one(doc! {}).await?); let unencrypted_coll = Client::with_uri_str(URI) .await? .database(&encrypted_namespace.db) .collection::(&encrypted_namespace.coll); println!( "Encrypted document: {:?}", - unencrypted_coll.find_one(None, None).await? + unencrypted_coll.find_one(doc! {}).await? ); // This would return a Write error with the message "Document failed validation". - // unencrypted_coll.insert_one(doc! { "encryptedField": "123456789" }, None) + // unencrypted_coll.insert_one(doc! { "encryptedField": "123456789" }) // .await?; Ok(()) @@ -407,11 +407,10 @@ async fn main() -> Result<()> { db.create_collection("encryptedCollection").await?; coll.insert_one( doc! { "_id": 1, "firstName": "Jane", "lastName": "Doe" }, - None, ) .await?; let docs: Vec<_> = coll - .find(doc! {"firstName": "Jane"}, None) + .find(doc! {"firstName": "Jane"}) .await? .try_collect() .await?; @@ -540,7 +539,6 @@ async fn main() -> Result<()> { "encryptedIndexed": insert_payload_indexed, "encryptedUnindexed": insert_payload_unindexed, }, - None, ) .await?; @@ -556,7 +554,7 @@ async fn main() -> Result<()> { // Find the document we inserted using the encrypted payload. // The returned document is automatically decrypted. let doc = coll - .find_one(doc! { "encryptedIndexed": find_payload }, None) + .find_one(doc! { "encryptedIndexed": find_payload }) .await?; println!("Returned document: {:?}", doc); @@ -634,9 +632,9 @@ async fn main() -> Result<()> { Algorithm::AeadAes256CbcHmacSha512Deterministic, ) .await?; - coll.insert_one(doc! { "encryptedField": encrypted_field }, None) + coll.insert_one(doc! { "encryptedField": encrypted_field }) .await?; - let mut doc = coll.find_one(None, None).await?.unwrap(); + let mut doc = coll.find_one(doc! {}).await?.unwrap(); println!("Encrypted document: {:?}", doc); // Explicitly decrypt the field: @@ -735,10 +733,10 @@ async fn main() -> Result<()> { Algorithm::AeadAes256CbcHmacSha512Deterministic, ) .await?; - coll.insert_one(doc! { "encryptedField": encrypted_field }, None) + coll.insert_one(doc! { "encryptedField": encrypted_field }) .await?; // Automatically decrypts any encrypted fields. - let doc = coll.find_one(None, None).await?.unwrap(); + let doc = coll.find_one(doc! {}).await?.unwrap(); println!("Decrypted document: {:?}", doc); let unencrypted_coll = Client::with_uri_str(URI) .await? @@ -746,7 +744,7 @@ async fn main() -> Result<()> { .collection::("coll"); println!( "Encrypted document: {:?}", - unencrypted_coll.find_one(None, None).await? + unencrypted_coll.find_one(doc! {}).await? ); Ok(()) diff --git a/manual/src/reading.md b/manual/src/reading.md index cf064d821..b54ba6b4c 100644 --- a/manual/src/reading.md +++ b/manual/src/reading.md @@ -58,7 +58,7 @@ let coll = client.database("items").collection::("in_stock"); for i in 0..5 { // Perform operations that work with directly our model. - coll.insert_one(Item { id: i }, None).await; + coll.insert_one(Item { id: i }).await; } # # Ok(()) @@ -89,9 +89,10 @@ use futures::stream::TryStreamExt; use mongodb::{bson::doc, options::FindOptions}; // Query the books in the collection with a filter and an option. -let filter = doc! { "author": "George Orwell" }; -let find_options = FindOptions::builder().sort(doc! { "title": 1 }).build(); -let mut cursor = typed_collection.find(filter, find_options).await?; +let mut cursor = typed_collection + .find(doc! { "author": "George Orwell" }) + .sort(doc! { "title": 1 }) + .await?; // Iterate over the results of the cursor. while let Some(book) = cursor.try_next().await? { diff --git a/manual/src/tracing.md b/manual/src/tracing.md index f981edef6..2e6ca01cd 100644 --- a/manual/src/tracing.md +++ b/manual/src/tracing.md @@ -62,7 +62,7 @@ async fn main() -> Result<()> { // Insert a document. let coll = client.database("test").collection("test_coll"); - coll.insert_one(doc! { "x" : 1 }, None).await?; + coll.insert_one(doc! { "x" : 1 }).await?; Ok(()) } @@ -114,7 +114,7 @@ async fn main() -> Result<()> { // Insert a document. let coll = client.database("test").collection("test_coll"); - coll.insert_one(doc! { "x" : 1 }, None).await?; + coll.insert_one(doc! { "x" : 1 }).await?; Ok(()) } diff --git a/rustfmt.toml b/rustfmt.toml index 38aa12156..9a0e68abc 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,9 +1,8 @@ -edition = "2018" +edition = "2021" combine_control_expr = false comment_width = 100 condense_wildcard_suffixes = true format_strings = true -normalize_comments = true use_try_shorthand = true wrap_comments = true imports_layout = "HorizontalVertical" diff --git a/src/action.rs b/src/action.rs index f09c8a2ec..98bfc28e7 100644 --- a/src/action.rs +++ b/src/action.rs @@ -11,17 +11,25 @@ mod delete; mod distinct; mod drop; mod drop_index; +mod find; +mod find_and_modify; +pub mod gridfs; +mod insert_many; +mod insert_one; mod list_collections; mod list_databases; mod list_indexes; mod perf; +mod replace_one; mod run_command; +mod search_index; mod session; mod shutdown; +pub(crate) mod transaction; mod update; mod watch; -use std::{marker::PhantomData, ops::Deref}; +use std::{future::IntoFuture, marker::PhantomData, ops::Deref}; use crate::bson::Document; @@ -34,13 +42,20 @@ pub use delete::Delete; pub use distinct::Distinct; pub use drop::{DropCollection, DropDatabase}; pub use drop_index::DropIndex; +pub use find::{Find, FindOne}; +pub use find_and_modify::{FindOneAndDelete, FindOneAndReplace, FindOneAndUpdate}; +pub use insert_many::InsertMany; +pub use insert_one::InsertOne; pub use list_collections::ListCollections; pub use list_databases::ListDatabases; pub use list_indexes::ListIndexes; pub use perf::WarmConnectionPool; +pub use replace_one::ReplaceOne; pub use run_command::{RunCommand, RunCursorCommand}; +pub use search_index::{CreateSearchIndex, DropSearchIndex, ListSearchIndexes, UpdateSearchIndex}; pub use session::StartSession; pub use shutdown::Shutdown; +pub use transaction::{AbortTransaction, CommitTransaction, StartTransaction}; pub use update::Update; pub use watch::Watch; @@ -60,6 +75,7 @@ pub struct Single; pub struct Multiple; macro_rules! option_setters { + // Include options aggregate accessors. ( $opt_field:ident: $opt_field_ty:ty; $( @@ -67,16 +83,32 @@ macro_rules! option_setters { $opt_name:ident: $opt_ty:ty, )* ) => { + #[allow(unused)] fn options(&mut self) -> &mut $opt_field_ty { self.$opt_field.get_or_insert_with(<$opt_field_ty>::default) } /// Set all options. Note that this will replace all previous values set. pub fn with_options(mut self, value: impl Into>) -> Self { - self.options = value.into(); + self.$opt_field = value.into(); self } + crate::action::option_setters!($opt_field_ty; + $( + $(#[$($attrss)*])* + $opt_name: $opt_ty, + )* + ); + }; + // Just generate field setters. + ( + $opt_field_ty:ty; + $( + $(#[$($attrss:tt)*])* + $opt_name:ident: $opt_ty:ty, + )* + ) => { $( #[doc = concat!("Set the [`", stringify!($opt_field_ty), "::", stringify!($opt_name), "`] option.")] $(#[$($attrss)*])* @@ -89,12 +121,13 @@ macro_rules! option_setters { } use option_setters; +pub(crate) mod private { + pub trait Sealed {} +} + /// A pending action to execute on the server. The action can be configured via chained methods and /// executed via `await` (or `run` if using the sync client). -pub trait Action { - /// The type of the value produced by execution. - type Output; - +pub trait Action: private::Sealed + IntoFuture { /// If the value is `Some`, call the provided function on `self`. Convenient for chained /// updates with values that need to be set conditionally. For example: /// ```rust @@ -119,83 +152,7 @@ pub trait Action { } } -/// Generates: -/// * an `IntoFuture` executing the given method body -/// * an opaque wrapper type for the future in case we want to do something more fancy than -/// BoxFuture. -/// * a `run` method for sync execution, optionally with a wrapper function -macro_rules! action_impl { - // Generate with no sync type conversion - ( - impl$(<$lt:lifetime $(, $($at:ident),+)?>)? Action for $action:ty { - type Future = $f_ty:ident; - async fn execute($($args:ident)+) -> $out:ty $code:block - } - ) => { - crate::action::action_impl! { - impl$(<$lt $(, $($at),+)?>)? Action for $action { - type Future = $f_ty; - async fn execute($($args)+) -> $out $code - fn sync_wrap(out) -> $out { out } - } - } - }; - // Generate with a sync type conversion - ( - impl$(<$lt:lifetime $(, $($at:ident),+)?>)? Action for $action:ty { - type Future = $f_ty:ident; - async fn execute($($args:ident)+) -> $out:ty $code:block - fn sync_wrap($($wrap_args:ident)+) -> $sync_out:ty $wrap_code:block - } - ) => { - impl$(<$lt $(, $($at),+)?>)? std::future::IntoFuture for $action { - type Output = $out; - type IntoFuture = $f_ty$(<$lt>)?; - - fn into_future($($args)+) -> Self::IntoFuture { - $f_ty(Box::pin(async move { - $code - })) - } - } - - impl$(<$lt $(, $($at),+)?>)? crate::action::Action for $action { - type Output = $out; - } - - crate::action::action_impl_future_wrapper!($($lt)?, $f_ty, $out); - - impl$(<$lt>)? std::future::Future for $f_ty$(<$lt>)? { - type Output = $out; - - fn poll(mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll { - self.0.as_mut().poll(cx) - } - } - - #[cfg(feature = "sync")] - impl$(<$lt $(, $($at),+)?>)? $action { - /// Synchronously execute this action. - pub fn run(self) -> $sync_out { - let $($wrap_args)+ = crate::sync::TOKIO_RUNTIME.block_on(std::future::IntoFuture::into_future(self)); - return $wrap_code - } - } - } -} -pub(crate) use action_impl; - -macro_rules! action_impl_future_wrapper { - (, $f_ty:ident, $out:ty) => { - /// Opaque future type for action execution. - pub struct $f_ty(crate::BoxFuture<'static, $out>); - }; - ($lt:lifetime, $f_ty:ident, $out:ty) => { - /// Opaque future type for action execution. - pub struct $f_ty<$lt>(crate::BoxFuture<$lt, $out>); - }; -} -pub(crate) use action_impl_future_wrapper; +pub(crate) use action_macro::{action_impl, deeplink}; use crate::Collection; @@ -205,7 +162,7 @@ pub(crate) struct CollRef<'a> { } impl<'a> CollRef<'a> { - fn new(coll: &'a Collection) -> Self { + fn new(coll: &'a Collection) -> Self { Self { inner: coll.clone_with_type(), _ref: PhantomData, diff --git a/src/action/aggregate.rs b/src/action/aggregate.rs index aa364af82..7872ace6f 100644 --- a/src/action/aggregate.rs +++ b/src/action/aggregate.rs @@ -16,7 +16,7 @@ use crate::{ SessionCursor, }; -use super::{action_impl, option_setters, CollRef, ExplicitSession, ImplicitSession}; +use super::{action_impl, deeplink, option_setters, CollRef, ExplicitSession, ImplicitSession}; impl Database { /// Runs an aggregation operation. @@ -24,8 +24,9 @@ impl Database { /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// `await` will return `Result<`[`Cursor`]`>` or `Result>` if + /// `await` will return d[`Result>`] or d[`Result>`] if /// a `ClientSession` is provided. + #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { Aggregate { target: AggregateTargetRef::Database(self), @@ -36,14 +37,18 @@ impl Database { } } -impl Collection { +impl Collection +where + T: Send + Sync, +{ /// Runs an aggregation operation. /// /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// `await` will return `Result>` or `Result>` if - /// a `ClientSession` is provided. + /// `await` will return d[`Result>`] or d[`Result>`] if + /// a [`ClientSession`] is provided. + #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { Aggregate { target: AggregateTargetRef::Collection(CollRef::new(self)), @@ -61,28 +66,33 @@ impl crate::sync::Database { /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// [`run`](Aggregate::run) will return `Result<`[`Cursor`]`>` or - /// `Result>` if a `ClientSession` is provided. + /// [`run`](Aggregate::run) will return d[`Result>`] or + /// d[`Result>`] if a [`ClientSession`] is provided. + #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { self.async_database.aggregate(pipeline) } } #[cfg(feature = "sync")] -impl crate::sync::Collection { +impl crate::sync::Collection +where + T: Send + Sync, +{ /// Runs an aggregation operation. /// /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// [`run`](Aggregate::run) will return `Result>` or - /// `Result>` if a `ClientSession` is provided. + /// [`run`](Aggregate::run) will return d[`Result>`] or + /// d[`Result>`] if a `ClientSession` is provided. + #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { self.async_collection.aggregate(pipeline) } } -/// Run an aggregation operation. Create by calling [`Database::aggregate`] or +/// Run an aggregation operation. Construct with [`Database::aggregate`] or /// [`Collection::aggregate`]. #[must_use] pub struct Aggregate<'a, Session = ImplicitSession> { @@ -110,7 +120,7 @@ impl<'a, Session> Aggregate<'a, Session> { } impl<'a> Aggregate<'a, ImplicitSession> { - /// Runs the operation using the provided session. + /// Use the provided session when running the operation. pub fn session( self, value: impl Into<&'a mut ClientSession>, @@ -124,45 +134,49 @@ impl<'a> Aggregate<'a, ImplicitSession> { } } -action_impl! { - impl<'a> Action for Aggregate<'a, ImplicitSession> { - type Future = AggregateFuture; - - async fn execute(mut self) -> Result> { - resolve_options!( - self.target, - self.options, - [read_concern, write_concern, selection_criteria] - ); - - let aggregate = crate::operation::aggregate::Aggregate::new(self.target.target(), self.pipeline, self.options); - let client = self.target.client(); - client.execute_cursor_operation(aggregate).await - } - - fn sync_wrap(out) -> Result> { - out.map(crate::sync::Cursor::new) - } +#[action_impl(sync = crate::sync::Cursor)] +impl<'a> Action for Aggregate<'a, ImplicitSession> { + type Future = AggregateFuture; + + async fn execute(mut self) -> Result> { + resolve_options!( + self.target, + self.options, + [read_concern, write_concern, selection_criteria] + ); + + let aggregate = crate::operation::aggregate::Aggregate::new( + self.target.target(), + self.pipeline, + self.options, + ); + let client = self.target.client(); + client.execute_cursor_operation(aggregate).await } } -action_impl! { - impl<'a> Action for Aggregate<'a, ExplicitSession<'a>> { - type Future = AggregateSessionFuture; - - async fn execute(mut self) -> Result> { - resolve_read_concern_with_session!(self.target, self.options, Some(&mut *self.session.0))?; - resolve_write_concern_with_session!(self.target, self.options, Some(&mut *self.session.0))?; - resolve_selection_criteria_with_session!(self.target, self.options, Some(&mut *self.session.0))?; - - let aggregate = crate::operation::aggregate::Aggregate::new(self.target.target(), self.pipeline, self.options); - let client = self.target.client(); - client.execute_session_cursor_operation(aggregate, self.session.0).await - } - - fn sync_wrap(out) -> Result> { - out.map(crate::sync::SessionCursor::new) - } +#[action_impl(sync = crate::sync::SessionCursor)] +impl<'a> Action for Aggregate<'a, ExplicitSession<'a>> { + type Future = AggregateSessionFuture; + + async fn execute(mut self) -> Result> { + resolve_read_concern_with_session!(self.target, self.options, Some(&mut *self.session.0))?; + resolve_write_concern_with_session!(self.target, self.options, Some(&mut *self.session.0))?; + resolve_selection_criteria_with_session!( + self.target, + self.options, + Some(&mut *self.session.0) + )?; + + let aggregate = crate::operation::aggregate::Aggregate::new( + self.target.target(), + self.pipeline, + self.options, + ); + let client = self.target.client(); + client + .execute_session_cursor_operation(aggregate, self.session.0) + .await } } diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index def9c4877..a35a7f0bc 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -61,51 +61,50 @@ impl<'a> BulkWrite<'a> { } } -action_impl! { - impl<'a> Action for BulkWrite<'a> { - type Future = BulkWriteFuture; - - async fn execute(mut self) -> Result { - let mut total_attempted = 0; - let mut execution_status = ExecutionStatus::None; - - while total_attempted < self.models.len() - && execution_status.should_continue(self.is_ordered()) - { - let mut operation = BulkWriteOperation::new( - self.client.clone(), - &self.models[total_attempted..], - total_attempted, - self.options.as_ref(), +#[action_impl] +impl<'a> Action for BulkWrite<'a> { + type Future = BulkWriteFuture; + + async fn execute(mut self) -> Result { + let mut total_attempted = 0; + let mut execution_status = ExecutionStatus::None; + + while total_attempted < self.models.len() + && execution_status.should_continue(self.is_ordered()) + { + let mut operation = BulkWriteOperation::new( + self.client.clone(), + &self.models[total_attempted..], + total_attempted, + self.options.as_ref(), + ) + .await; + let result = self + .client + .execute_operation::( + &mut operation, + self.session.as_deref_mut(), ) .await; - let result = self - .client - .execute_operation::( - &mut operation, - self.session.as_deref_mut(), - ) - .await; - total_attempted += operation.n_attempted; - - match result { - Ok(result) => { - execution_status = execution_status.with_success(result); - } - Err(error) => { - execution_status = execution_status.with_failure(error); - } + total_attempted += operation.n_attempted; + + match result { + Ok(result) => { + execution_status = execution_status.with_success(result); + } + Err(error) => { + execution_status = execution_status.with_failure(error); } } + } - match execution_status { - ExecutionStatus::Success(bulk_write_result) => Ok(bulk_write_result), - ExecutionStatus::Error(error) => Err(error), - ExecutionStatus::None => Err(ErrorKind::InvalidArgument { - message: "bulk_write must be provided at least one write operation".into(), - } - .into()), + match execution_status { + ExecutionStatus::Success(bulk_write_result) => Ok(bulk_write_result), + ExecutionStatus::Error(error) => Err(error), + ExecutionStatus::None => Err(ErrorKind::InvalidArgument { + message: "bulk_write must be provided at least one write operation".into(), } + .into()), } } } diff --git a/src/action/count.rs b/src/action/count.rs index e3bf1a613..e3fdea48b 100644 --- a/src/action/count.rs +++ b/src/action/count.rs @@ -7,9 +7,12 @@ use crate::{ Collection, }; -use super::{action_impl, option_setters, CollRef}; +use super::{action_impl, deeplink, option_setters, CollRef}; -impl Collection { +impl Collection +where + T: Send + Sync, +{ /// Estimates the number of documents in the collection using collection metadata. /// /// Due to an oversight in versions 5.0.0 - 5.0.7 of MongoDB, the `count` server command, @@ -22,7 +25,8 @@ impl Collection { /// For more information on the behavior of the `count` server command, see /// [Count: Behavior](https://www.mongodb.com/docs/manual/reference/command/count/#behavior). /// - /// `await` will return `Result`. + /// `await` will return d[`Result`]. + #[deeplink] pub fn estimated_document_count(&self) -> EstimatedDocumentCount { EstimatedDocumentCount { cr: CollRef::new(self), @@ -34,7 +38,8 @@ impl Collection { /// /// Note that this method returns an accurate count. /// - /// `await` will return `Result`. + /// `await` will return d[`Result`]. + #[deeplink] pub fn count_documents(&self, filter: Document) -> CountDocuments { CountDocuments { cr: CollRef::new(self), @@ -46,7 +51,10 @@ impl Collection { } #[cfg(feature = "sync")] -impl crate::sync::Collection { +impl crate::sync::Collection +where + T: Send + Sync, +{ /// Estimates the number of documents in the collection using collection metadata. /// /// Due to an oversight in versions 5.0.0 - 5.0.7 of MongoDB, the `count` server command, @@ -59,7 +67,8 @@ impl crate::sync::Collection { /// For more information on the behavior of the `count` server command, see /// [Count: Behavior](https://www.mongodb.com/docs/manual/reference/command/count/#behavior). /// - /// [`run`](EstimatedDocumentCount::run) will return `Result`. + /// [`run`](EstimatedDocumentCount::run) will return d[`Result`]. + #[deeplink] pub fn estimated_document_count(&self) -> EstimatedDocumentCount { self.async_collection.estimated_document_count() } @@ -68,13 +77,14 @@ impl crate::sync::Collection { /// /// Note that this method returns an accurate count. /// - /// [`run`](CountDocuments::run) will return `Result`. + /// [`run`](CountDocuments::run) will return d[`Result`]. + #[deeplink] pub fn count_documents(&self, filter: Document) -> CountDocuments { self.async_collection.count_documents(filter) } } -/// Gather an estimated document count. Create by calling [`Collection::estimated_document_count`]. +/// Gather an estimated document count. Construct with [`Collection::estimated_document_count`]. #[must_use] pub struct EstimatedDocumentCount<'a> { cr: CollRef<'a>, @@ -90,19 +100,18 @@ impl<'a> EstimatedDocumentCount<'a> { ); } -action_impl! { - impl<'a> Action for EstimatedDocumentCount<'a> { - type Future = EstimatedDocumentCountFuture; +#[action_impl] +impl<'a> Action for EstimatedDocumentCount<'a> { + type Future = EstimatedDocumentCountFuture; - async fn execute(mut self) -> Result { - resolve_options!(self.cr, self.options, [read_concern, selection_criteria]); - let op = crate::operation::count::Count::new(self.cr.namespace(), self.options); - self.cr.client().execute_operation(op, None).await - } + async fn execute(mut self) -> Result { + resolve_options!(self.cr, self.options, [read_concern, selection_criteria]); + let op = crate::operation::count::Count::new(self.cr.namespace(), self.options); + self.cr.client().execute_operation(op, None).await } } -/// Get an accurate count of documents. Create by calling [`Collection::count_documents`]. +/// Get an accurate count of documents. Construct with [`Collection::count_documents`]. #[must_use] pub struct CountDocuments<'a> { cr: CollRef<'a>, @@ -123,23 +132,26 @@ impl<'a> CountDocuments<'a> { comment: bson::Bson, ); - /// Runs the operation using the provided session. + /// Use the provided session when running the operation. pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { self.session = Some(value.into()); self } } -action_impl! { - impl<'a> Action for CountDocuments<'a> { - type Future = CountDocumentsFuture; +#[action_impl] +impl<'a> Action for CountDocuments<'a> { + type Future = CountDocumentsFuture; - async fn execute(mut self) -> Result { - resolve_read_concern_with_session!(self.cr, self.options, self.session.as_ref())?; - resolve_selection_criteria_with_session!(self.cr, self.options, self.session.as_ref())?; + async fn execute(mut self) -> Result { + resolve_read_concern_with_session!(self.cr, self.options, self.session.as_ref())?; + resolve_selection_criteria_with_session!(self.cr, self.options, self.session.as_ref())?; - let op = crate::operation::count_documents::CountDocuments::new(self.cr.namespace(), self.filter, self.options)?; - self.cr.client().execute_operation(op, self.session).await - } + let op = crate::operation::count_documents::CountDocuments::new( + self.cr.namespace(), + self.filter, + self.options, + )?; + self.cr.client().execute_operation(op, self.session).await } } diff --git a/src/action/create_collection.rs b/src/action/create_collection.rs index 3d8827be4..8fb90b9c6 100644 --- a/src/action/create_collection.rs +++ b/src/action/create_collection.rs @@ -2,7 +2,7 @@ use bson::Document; use crate::{options::CreateCollectionOptions, ClientSession, Database}; -use crate::action::option_setters; +use crate::action::{deeplink, option_setters}; impl Database { /// Creates a new collection in the database with the given `name`. @@ -10,11 +10,12 @@ impl Database { /// Note that MongoDB creates collections implicitly when data is inserted, so this method is /// not needed if no special options are required. /// - /// `await` will return `Result<()>`. - pub fn create_collection(&self, name: impl AsRef) -> CreateCollection { + /// `await` will return d[`Result<()>`]. + #[deeplink] + pub fn create_collection(&self, name: impl Into) -> CreateCollection { CreateCollection { db: self, - name: name.as_ref().to_owned(), + name: name.into(), options: None, session: None, } @@ -28,13 +29,14 @@ impl crate::sync::Database { /// Note that MongoDB creates collections implicitly when data is inserted, so this method is /// not needed if no special options are required. /// - /// [`run`](CreateCollection::run) will return `Result<()>`. - pub fn create_collection(&self, name: impl AsRef) -> CreateCollection { + /// [`run`](CreateCollection::run) will return d[`Result<()>`]. + #[deeplink] + pub fn create_collection(&self, name: impl Into) -> CreateCollection { self.async_database.create_collection(name) } } -/// Creates a new collection. Create by calling [`Database::create_collection`]. +/// Creates a new collection. Construct with [`Database::create_collection`]. #[must_use] pub struct CreateCollection<'a> { pub(crate) db: &'a Database, @@ -66,7 +68,7 @@ impl<'a> CreateCollection<'a> { encrypted_fields: Document, ); - /// Runs the operation using the provided session. + /// Use the provided session when running the operation. pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { self.session = Some(value.into()); self diff --git a/src/action/create_index.rs b/src/action/create_index.rs index 59bf4d848..37be8cc07 100644 --- a/src/action/create_index.rs +++ b/src/action/create_index.rs @@ -13,12 +13,16 @@ use crate::{ IndexModel, }; -use super::{action_impl, option_setters, CollRef, Multiple, Single}; +use super::{action_impl, deeplink, option_setters, CollRef, Multiple, Single}; -impl Collection { +impl Collection +where + T: Send + Sync, +{ /// Creates the given index on this collection. /// - /// `await` will return `Result`. + /// `await` will return d[`Result`]. + #[deeplink] pub fn create_index(&self, index: IndexModel) -> CreateIndex { CreateIndex { coll: CollRef::new(self), @@ -31,7 +35,8 @@ impl Collection { /// Creates the given indexes on this collection. /// - /// `await` will return `Result`. + /// `await` will return d[`Result`]. + #[deeplink] pub fn create_indexes( &self, indexes: impl IntoIterator, @@ -46,18 +51,23 @@ impl Collection { } } -#[cfg(any(feature = "sync", feature = "tokio-sync"))] -impl crate::sync::Collection { +#[cfg(feature = "sync")] +impl crate::sync::Collection +where + T: Send + Sync, +{ /// Creates the given index on this collection. /// - /// [`run`](CreateIndex::run) will return `Result`. + /// [`run`](CreateIndex::run) will return d[`Result`]. + #[deeplink] pub fn create_index(&self, index: IndexModel) -> CreateIndex { self.async_collection.create_index(index) } /// Creates the given indexes on this collection. /// - /// [`run`](CreateIndex::run) will return `Result`. + /// [`run`](CreateIndex::run) will return d[`Result`]. + #[deeplink] pub fn create_indexes( &self, indexes: impl IntoIterator, @@ -85,40 +95,38 @@ impl<'a, M> CreateIndex<'a, M> { comment: Bson, ); - /// Runs the operation using the provided session. + /// Use the provided session when running the operation. pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { self.session = Some(value.into()); self } } -action_impl! { - impl<'a> Action for CreateIndex<'a, Single> { - type Future = CreateIndexFuture; +#[action_impl] +impl<'a> Action for CreateIndex<'a, Single> { + type Future = CreateIndexFuture; - async fn execute(self) -> Result { - let inner: CreateIndex<'a, Multiple> = CreateIndex { - coll: self.coll, - indexes: self.indexes, - options: self.options, - session: self.session, - _mode: PhantomData, - }; - let response = inner.await?; - Ok(response.into_create_index_result()) - } + async fn execute(self) -> Result { + let inner: CreateIndex<'a, Multiple> = CreateIndex { + coll: self.coll, + indexes: self.indexes, + options: self.options, + session: self.session, + _mode: PhantomData, + }; + let response = inner.await?; + Ok(response.into_create_index_result()) } } -action_impl! { - impl<'a> Action for CreateIndex<'a, Multiple> { - type Future = CreateIndexesFuture; +#[action_impl] +impl<'a> Action for CreateIndex<'a, Multiple> { + type Future = CreateIndexesFuture; - async fn execute(mut self) -> Result { - resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; + async fn execute(mut self) -> Result { + resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; - let op = Op::new(self.coll.namespace(), self.indexes, self.options); - self.coll.client().execute_operation(op, self.session).await - } + let op = Op::new(self.coll.namespace(), self.indexes, self.options); + self.coll.client().execute_operation(op, self.session).await } } diff --git a/src/action/csfle/create_data_key.rs b/src/action/csfle/create_data_key.rs index 743a3ba66..ac7821664 100644 --- a/src/action/csfle/create_data_key.rs +++ b/src/action/csfle/create_data_key.rs @@ -1,12 +1,13 @@ use crate::client_encryption::{ClientEncryption, MasterKey}; -use super::super::option_setters; +use super::super::{deeplink, option_setters}; impl ClientEncryption { /// Creates a new key document and inserts into the key vault collection. /// - /// `await` will return `Result` (subtype 0x04) with the _id of the created + /// `await` will return d[`Result`] (subtype 0x04) with the _id of the created /// document as a UUID. + #[deeplink] pub fn create_data_key(&self, master_key: MasterKey) -> CreateDataKey { CreateDataKey { client_enc: self, diff --git a/src/action/csfle/create_encrypted_collection.rs b/src/action/csfle/create_encrypted_collection.rs index dcda971ef..d73a01c18 100644 --- a/src/action/csfle/create_encrypted_collection.rs +++ b/src/action/csfle/create_encrypted_collection.rs @@ -68,48 +68,57 @@ impl<'a> CreateEncryptedCollection<'a> { ); } -action_impl! { - impl<'a> Action for CreateEncryptedCollection<'a> { - type Future = CreateEncryptedCollectionFuture; +#[action_impl] +impl<'a> Action for CreateEncryptedCollection<'a> { + type Future = CreateEncryptedCollectionFuture; - async fn execute(self) -> (Document, Result<()>) { - let ef = match self.options.as_ref().and_then(|o| o.encrypted_fields.as_ref()) { - Some(ef) => ef, - None => { - return ( - doc! {}, - Err(Error::invalid_argument( - "no encrypted_fields defined for collection", - )), - ); - } - }; - let mut ef_prime = ef.clone(); - if let Ok(fields) = ef_prime.get_array_mut("fields") { - for f in fields { - let f_doc = if let Some(d) = f.as_document_mut() { - d - } else { - continue; + async fn execute(self) -> (Document, Result<()>) { + let ef = match self + .options + .as_ref() + .and_then(|o| o.encrypted_fields.as_ref()) + { + Some(ef) => ef, + None => { + return ( + doc! {}, + Err(Error::invalid_argument( + "no encrypted_fields defined for collection", + )), + ); + } + }; + let mut ef_prime = ef.clone(); + if let Ok(fields) = ef_prime.get_array_mut("fields") { + for f in fields { + let f_doc = if let Some(d) = f.as_document_mut() { + d + } else { + continue; + }; + if f_doc.get("keyId") == Some(&Bson::Null) { + let d = match self + .client_enc + .create_data_key(self.master_key.clone()) + .await + { + Ok(v) => v, + Err(e) => return (ef_prime, Err(e)), }; - if f_doc.get("keyId") == Some(&Bson::Null) { - let d = match self.client_enc.create_data_key(self.master_key.clone()).await { - Ok(v) => v, - Err(e) => return (ef_prime, Err(e)), - }; - f_doc.insert("keyId", d); - } + f_doc.insert("keyId", d); } } - // Unwrap safety: the check for `encrypted_fields` at the top won't succeed if `self.options` is `None`. - let mut opts_prime = self.options.unwrap(); - opts_prime.encrypted_fields = Some(ef_prime.clone()); - ( - ef_prime, - self.db.create_collection(self.name) - .with_options(opts_prime) - .await, - ) } + // Unwrap safety: the check for `encrypted_fields` at the top won't succeed if + // `self.options` is `None`. + let mut opts_prime = self.options.unwrap(); + opts_prime.encrypted_fields = Some(ef_prime.clone()); + ( + ef_prime, + self.db + .create_collection(self.name) + .with_options(opts_prime) + .await, + ) } } diff --git a/src/action/csfle/encrypt.rs b/src/action/csfle/encrypt.rs index 33737a46d..ed06457f4 100644 --- a/src/action/csfle/encrypt.rs +++ b/src/action/csfle/encrypt.rs @@ -4,7 +4,7 @@ use serde::Serialize; use serde_with::skip_serializing_none; use typed_builder::TypedBuilder; -use super::super::option_setters; +use super::super::{deeplink, option_setters}; use crate::client_encryption::ClientEncryption; impl ClientEncryption { @@ -14,7 +14,8 @@ impl ClientEncryption { /// `AutoEncryptionOptions`. `AutoEncryptionOptions.bypass_query_analysis` may be true. /// `AutoEncryptionOptions.bypass_auto_encryption` must be false. /// - /// `await` will return a `Result` (subtype 6) containing the encrypted value. + /// `await` will return a d[`Result`] (subtype 6) containing the encrypted value. + #[deeplink] pub fn encrypt( &self, value: impl Into, @@ -39,7 +40,8 @@ impl ClientEncryption { /// The expression will be encrypted using the [`Algorithm::RangePreview`] algorithm and the /// "rangePreview" query type. /// - /// `await` returns a `Result` containing the encrypted expression. + /// `await` will return a d[`Result`] containing the encrypted expression. + #[deeplink] pub fn encrypt_expression( &self, expression: RawDocumentBuf, diff --git a/src/action/delete.rs b/src/action/delete.rs index 8cd8ae584..a29919378 100644 --- a/src/action/delete.rs +++ b/src/action/delete.rs @@ -11,9 +11,12 @@ use crate::{ Collection, }; -use super::{action_impl, option_setters, CollRef}; +use super::{action_impl, deeplink, option_setters, CollRef}; -impl Collection { +impl Collection +where + T: Send + Sync, +{ /// Deletes up to one document found matching `query`. /// /// This operation will retry once upon failure if the connection and encountered error support @@ -21,7 +24,8 @@ impl Collection { /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on /// retryable writes. /// - /// `await` will return `Result`. + /// `await` will return d[`Result`]. + #[deeplink] pub fn delete_one(&self, query: Document) -> Delete { Delete { coll: CollRef::new(self), @@ -34,7 +38,8 @@ impl Collection { /// Deletes all documents stored in the collection matching `query`. /// - /// `await` will return `Result`. + /// `await` will return d[`Result`]. + #[deeplink] pub fn delete_many(&self, query: Document) -> Delete { Delete { coll: CollRef::new(self), @@ -46,8 +51,11 @@ impl Collection { } } -#[cfg(any(feature = "sync", feature = "tokio-sync"))] -impl crate::sync::Collection { +#[cfg(feature = "sync")] +impl crate::sync::Collection +where + T: Send + Sync, +{ /// Deletes up to one document found matching `query`. /// /// This operation will retry once upon failure if the connection and encountered error support @@ -55,14 +63,16 @@ impl crate::sync::Collection { /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on /// retryable writes. /// - /// [`run`](Delete::run) will return `Result`. + /// [`run`](Delete::run) will return d[`Result`]. + #[deeplink] pub fn delete_one(&self, query: Document) -> Delete { self.async_collection.delete_one(query) } /// Deletes all documents stored in the collection matching `query`. /// - /// [`run`](Delete::run) will return `Result`. + /// [`run`](Delete::run) will return d[`Result`]. + #[deeplink] pub fn delete_many(&self, query: Document) -> Delete { self.async_collection.delete_many(query) } @@ -88,22 +98,21 @@ impl<'a> Delete<'a> { comment: Bson, ); - /// Runs the operation using the provided session. + /// Use the provided session when running the operation. pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { self.session = Some(value.into()); self } } -action_impl! { - impl<'a> Action for Delete<'a> { - type Future = DeleteFuture; +#[action_impl] +impl<'a> Action for Delete<'a> { + type Future = DeleteFuture; - async fn execute(mut self) -> Result { - resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; + async fn execute(mut self) -> Result { + resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; - let op = Op::new(self.coll.namespace(), self.query, self.limit, self.options); - self.coll.client().execute_operation(op, self.session).await - } + let op = Op::new(self.coll.namespace(), self.query, self.limit, self.options); + self.coll.client().execute_operation(op, self.session).await } } diff --git a/src/action/distinct.rs b/src/action/distinct.rs index 59a19dcac..2c7b42fd5 100644 --- a/src/action/distinct.rs +++ b/src/action/distinct.rs @@ -13,12 +13,16 @@ use crate::{ Collection, }; -use super::{action_impl, option_setters, CollRef}; +use super::{action_impl, deeplink, option_setters, CollRef}; -impl Collection { +impl Collection +where + T: Send + Sync, +{ /// Finds the distinct values of the field specified by `field_name` across the collection. /// - /// `await` will return `Result>`. + /// `await` will return d[`Result>`]. + #[deeplink] pub fn distinct(&self, field_name: impl AsRef, filter: Document) -> Distinct { Distinct { coll: CollRef::new(self), @@ -30,11 +34,15 @@ impl Collection { } } -#[cfg(any(feature = "sync", feature = "tokio-sync"))] -impl crate::sync::Collection { +#[cfg(feature = "sync")] +impl crate::sync::Collection +where + T: Send + Sync, +{ /// Finds the distinct values of the field specified by `field_name` across the collection. /// - /// [`run`](Distinct::run) will return `Result>`. + /// [`run`](Distinct::run) will return d[`Result>`]. + #[deeplink] pub fn distinct(&self, field_name: impl AsRef, filter: Document) -> Distinct { self.async_collection.distinct(field_name, filter) } @@ -59,28 +67,27 @@ impl<'a> Distinct<'a> { comment: Bson, ); - /// Runs the operation using the provided session. + /// Use the provided session when running the operation. pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { self.session = Some(value.into()); self } } -action_impl! { - impl<'a> Action for Distinct<'a> { - type Future = DistinctFuture; +#[action_impl] +impl<'a> Action for Distinct<'a> { + type Future = DistinctFuture; - async fn execute(mut self) -> Result> { - resolve_read_concern_with_session!(self.coll, self.options, self.session.as_ref())?; - resolve_selection_criteria_with_session!(self.coll, self.options, self.session.as_ref())?; + async fn execute(mut self) -> Result> { + resolve_read_concern_with_session!(self.coll, self.options, self.session.as_ref())?; + resolve_selection_criteria_with_session!(self.coll, self.options, self.session.as_ref())?; - let op = Op::new( - self.coll.namespace(), - self.field_name, - self.filter, - self.options, - ); - self.coll.client().execute_operation(op, self.session).await - } + let op = Op::new( + self.coll.namespace(), + self.field_name, + self.filter, + self.options, + ); + self.coll.client().execute_operation(op, self.session).await } } diff --git a/src/action/drop.rs b/src/action/drop.rs index 94f0dbe1d..69e7bc0d6 100644 --- a/src/action/drop.rs +++ b/src/action/drop.rs @@ -9,12 +9,13 @@ use crate::{ Database, }; -use super::{action_impl, option_setters, CollRef}; +use super::{action_impl, deeplink, option_setters, CollRef}; impl Database { /// Drops the database, deleting all data, collections, and indexes stored in it. /// - /// `await` will return `Result<()>`. + /// `await` will return d[`Result<()>`]. + #[deeplink] pub fn drop(&self) -> DropDatabase { DropDatabase { db: self, @@ -28,13 +29,14 @@ impl Database { impl crate::sync::Database { /// Drops the database, deleting all data, collections, and indexes stored in it. /// - /// [`run`](DropDatabase::run) will return `Result<()>`. + /// [`run`](DropDatabase::run) will return d[`Result<()>`]. + #[deeplink] pub fn drop(&self) -> DropDatabase { self.async_database.drop() } } -/// Drops the database, deleting all data, collections, and indexes stored in it. Create by calling +/// Drops the database, deleting all data, collections, and indexes stored in it. Construct with /// [`Database::drop`]. #[must_use] pub struct DropDatabase<'a> { @@ -55,24 +57,25 @@ impl<'a> DropDatabase<'a> { } } -action_impl! { - impl<'a> Action for DropDatabase<'a> { - type Future = DropDatabaseFuture; +#[action_impl] +impl<'a> Action for DropDatabase<'a> { + type Future = DropDatabaseFuture; - async fn execute(mut self) -> Result<()> { - resolve_options!(self.db, self.options, [write_concern]); - let op = drop_database::DropDatabase::new(self.db.name().to_string(), self.options); - self.db.client() - .execute_operation(op, self.session) - .await - } + async fn execute(mut self) -> Result<()> { + resolve_options!(self.db, self.options, [write_concern]); + let op = drop_database::DropDatabase::new(self.db.name().to_string(), self.options); + self.db.client().execute_operation(op, self.session).await } } -impl Collection { +impl Collection +where + T: Send + Sync, +{ /// Drops the collection, deleting all data and indexes stored in it. /// - /// `await` will return `Result<()>`. + /// `await` will return d[`Result<()>`]. + #[deeplink] pub fn drop(&self) -> DropCollection { DropCollection { cr: CollRef::new(self), @@ -83,16 +86,20 @@ impl Collection { } #[cfg(feature = "sync")] -impl crate::sync::Collection { +impl crate::sync::Collection +where + T: Send + Sync, +{ /// Drops the collection, deleting all data and indexes stored in it. /// - /// [`run`](DropCollection::run) will return `Result<()>`. + /// [`run`](DropCollection::run) will return d[`Result<()>`]. + #[deeplink] pub fn drop(&self) -> DropCollection { self.async_collection.drop() } } -/// Drops the collection, deleting all data and indexes stored in it. Create by calling +/// Drops the collection, deleting all data and indexes stored in it. Construct with /// [`Collection::drop`]. #[must_use] pub struct DropCollection<'a> { diff --git a/src/action/drop_index.rs b/src/action/drop_index.rs index 8536dc9f9..7089f80a2 100644 --- a/src/action/drop_index.rs +++ b/src/action/drop_index.rs @@ -2,7 +2,7 @@ use std::time::Duration; use bson::Bson; -use super::{action_impl, option_setters, CollRef}; +use super::{action_impl, deeplink, option_setters, CollRef}; use crate::{ coll::options::DropIndexOptions, error::{ErrorKind, Result}, @@ -12,10 +12,14 @@ use crate::{ Collection, }; -impl Collection { +impl Collection +where + T: Send + Sync, +{ /// Drops the index specified by `name` from this collection. /// - /// `await` will return `Result<()>`. + /// `await` will return d[`Result<()>`]. + #[deeplink] pub fn drop_index(&self, name: impl AsRef) -> DropIndex { DropIndex { coll: CollRef::new(self), @@ -27,7 +31,8 @@ impl Collection { /// Drops all indexes associated with this collection. /// - /// `await` will return `Result<()>`. + /// `await` will return d[`Result<()>`]. + #[deeplink] pub fn drop_indexes(&self) -> DropIndex { DropIndex { coll: CollRef::new(self), @@ -38,18 +43,23 @@ impl Collection { } } -#[cfg(any(feature = "sync", feature = "tokio-sync"))] -impl crate::sync::Collection { +#[cfg(feature = "sync")] +impl crate::sync::Collection +where + T: Send + Sync, +{ /// Drops the index specified by `name` from this collection. /// - /// [`run`](DropIndex::run) will return `Result<()>`. + /// [`run`](DropIndex::run) will return d[`Result<()>`]. + #[deeplink] pub fn drop_index(&self, name: impl AsRef) -> DropIndex { self.async_collection.drop_index(name) } /// Drops all indexes associated with this collection. /// - /// [`run`](DropIndex::run) will return `Result<()>`. + /// [`run`](DropIndex::run) will return d[`Result<()>`]. + #[deeplink] pub fn drop_indexes(&self) -> DropIndex { self.async_collection.drop_indexes() } @@ -72,33 +82,32 @@ impl<'a> DropIndex<'a> { comment: Bson, ); - /// Runs the operation using the provided session. + /// Use the provided session when running the operation. pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { self.session = Some(value.into()); self } } -action_impl! { - impl<'a> Action for DropIndex<'a> { - type Future = DropIndexFuture; +#[action_impl] +impl<'a> Action for DropIndex<'a> { + type Future = DropIndexFuture; - async fn execute(mut self) -> Result<()> { - if matches!(self.name.as_deref(), Some("*")) { - return Err(ErrorKind::InvalidArgument { - message: "Cannot pass name \"*\" to drop_index since more than one index would be \ - dropped." - .to_string(), - } - .into()); + async fn execute(mut self) -> Result<()> { + if matches!(self.name.as_deref(), Some("*")) { + return Err(ErrorKind::InvalidArgument { + message: "Cannot pass name \"*\" to drop_index since more than one index would be \ + dropped." + .to_string(), } - resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; + .into()); + } + resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; - // If there is no provided name, that means we should drop all indexes. - let index_name = self.name.unwrap_or_else(|| "*".to_string()); + // If there is no provided name, that means we should drop all indexes. + let index_name = self.name.unwrap_or_else(|| "*".to_string()); - let op = Op::new(self.coll.namespace(), index_name, self.options); - self.coll.client().execute_operation(op, self.session).await - } + let op = Op::new(self.coll.namespace(), index_name, self.options); + self.coll.client().execute_operation(op, self.session).await } } diff --git a/src/action/find.rs b/src/action/find.rs new file mode 100644 index 000000000..4f8ca0173 --- /dev/null +++ b/src/action/find.rs @@ -0,0 +1,213 @@ +use std::time::Duration; + +use bson::{Bson, Document}; +use serde::de::DeserializeOwned; + +use crate::{ + coll::options::{CursorType, FindOneOptions, FindOptions, Hint}, + collation::Collation, + error::Result, + operation::Find as Op, + options::ReadConcern, + selection_criteria::SelectionCriteria, + ClientSession, + Collection, + Cursor, + SessionCursor, +}; + +use super::{action_impl, deeplink, option_setters, ExplicitSession, ImplicitSession}; + +impl Collection { + /// Finds the documents in the collection matching `filter`. + /// + /// `await` will return d[`Result>`] (or d[`Result>`] if a session is + /// provided). + #[deeplink] + pub fn find(&self, filter: Document) -> Find<'_, T> { + Find { + coll: self, + filter, + options: None, + session: ImplicitSession, + } + } +} + +impl Collection { + /// Finds a single document in the collection matching `filter`. + /// + /// `await` will return d[`Result>`]. + #[deeplink] + pub fn find_one(&self, filter: Document) -> FindOne<'_, T> { + FindOne { + coll: self, + filter, + options: None, + session: None, + } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::Collection { + /// Finds the documents in the collection matching `filter`. + /// + /// [`run`](Find::run) will return d[`Result>`] (or + /// d[`Result>`] if a session is provided). + #[deeplink] + pub fn find(&self, filter: Document) -> Find<'_, T> { + self.async_collection.find(filter) + } +} + +#[cfg(feature = "sync")] +impl crate::sync::Collection { + /// Finds a single document in the collection matching `filter`. + /// + /// [`run`](Find::run) will return d[`Result>`]. + #[deeplink] + pub fn find_one(&self, filter: Document) -> FindOne<'_, T> { + self.async_collection.find_one(filter) + } +} + +/// Finds the documents in a collection matching a filter. Construct with [`Collection::find`]. +#[must_use] +pub struct Find<'a, T: Send + Sync, Session = ImplicitSession> { + coll: &'a Collection, + filter: Document, + options: Option, + session: Session, +} + +impl<'a, T: Send + Sync, Session> Find<'a, T, Session> { + option_setters!(options: FindOptions; + allow_disk_use: bool, + allow_partial_results: bool, + batch_size: u32, + comment: String, + comment_bson: Bson, + cursor_type: CursorType, + hint: Hint, + limit: i64, + max: Document, + max_await_time: Duration, + max_scan: u64, + max_time: Duration, + min: Document, + no_cursor_timeout: bool, + projection: Document, + read_concern: ReadConcern, + return_key: bool, + selection_criteria: SelectionCriteria, + show_record_id: bool, + skip: u64, + sort: Document, + collation: Collation, + let_vars: Document, + ); + + /// Use the provided session when running the operation. + pub fn session<'s>( + self, + value: impl Into<&'s mut ClientSession>, + ) -> Find<'a, T, ExplicitSession<'s>> { + Find { + coll: self.coll, + filter: self.filter, + options: self.options, + session: ExplicitSession(value.into()), + } + } +} + +#[action_impl(sync = crate::sync::Cursor)] +impl<'a, T: Send + Sync> Action for Find<'a, T, ImplicitSession> { + type Future = FindFuture; + + async fn execute(mut self) -> Result> { + resolve_options!(self.coll, self.options, [read_concern, selection_criteria]); + + let find = Op::new(self.coll.namespace(), self.filter, self.options); + self.coll.client().execute_cursor_operation(find).await + } +} + +#[action_impl(sync = crate::sync::SessionCursor)] +impl<'a, T: Send + Sync> Action for Find<'a, T, ExplicitSession<'a>> { + type Future = FindSessionFuture; + + async fn execute(mut self) -> Result> { + resolve_read_concern_with_session!(self.coll, self.options, Some(&mut *self.session.0))?; + resolve_selection_criteria_with_session!( + self.coll, + self.options, + Some(&mut *self.session.0) + )?; + + let find = Op::new(self.coll.namespace(), self.filter, self.options); + self.coll + .client() + .execute_session_cursor_operation(find, self.session.0) + .await + } +} + +/// Finds a single document in a collection matching a filter. Construct with +/// [`Collection::find_one`]. +#[must_use] +pub struct FindOne<'a, T: Send + Sync> { + coll: &'a Collection, + filter: Document, + options: Option, + session: Option<&'a mut ClientSession>, +} + +impl<'a, T: Send + Sync> FindOne<'a, T> { + option_setters! { options: FindOneOptions; + allow_partial_results: bool, + collation: Collation, + comment: String, + comment_bson: Bson, + hint: Hint, + max: Document, + max_scan: u64, + max_time: Duration, + min: Document, + projection: Document, + read_concern: ReadConcern, + return_key: bool, + selection_criteria: SelectionCriteria, + show_record_id: bool, + skip: u64, + sort: Document, + let_vars: Document, + } + + /// Use the provided session when running the operation. + pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { + self.session = Some(value.into()); + self + } +} + +#[action_impl] +impl<'a, T: DeserializeOwned + Send + Sync> Action for FindOne<'a, T> { + type Future = FindOneFuture; + + async fn execute(self) -> Result> { + use futures_util::stream::StreamExt; + let mut options: FindOptions = self.options.unwrap_or_default().into(); + options.limit = Some(-1); + let find = self.coll.find(self.filter).with_options(options); + if let Some(session) = self.session { + let mut cursor = find.session(&mut *session).await?; + let mut stream = cursor.stream(session); + stream.next().await.transpose() + } else { + let mut cursor = find.await?; + cursor.next().await.transpose() + } + } +} diff --git a/src/action/find_and_modify.rs b/src/action/find_and_modify.rs new file mode 100644 index 000000000..b46bdab01 --- /dev/null +++ b/src/action/find_and_modify.rs @@ -0,0 +1,318 @@ +use std::{borrow::Borrow, time::Duration}; + +use bson::{Bson, Document, RawDocumentBuf}; +use serde::{de::DeserializeOwned, Serialize}; + +use crate::{ + coll::options::{ + FindOneAndDeleteOptions, + FindOneAndReplaceOptions, + FindOneAndUpdateOptions, + Hint, + ReturnDocument, + UpdateModifications, + }, + collation::Collation, + error::Result, + operation::{ + find_and_modify::options::{FindAndModifyOptions, Modification}, + FindAndModify as Op, + UpdateOrReplace, + }, + options::WriteConcern, + serde_util, + ClientSession, + Collection, +}; + +use super::{action_impl, deeplink, option_setters}; + +impl Collection { + async fn find_and_modify<'a>( + &self, + filter: Document, + modification: Modification, + mut options: Option, + session: Option<&'a mut ClientSession>, + ) -> Result> { + resolve_write_concern_with_session!(self, options, session.as_ref())?; + + let op = Op::::with_modification(self.namespace(), filter, modification, options)?; + self.client().execute_operation(op, session).await + } + + /// Atomically finds up to one document in the collection matching `filter` and deletes it. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// `await` will return d[`Result>`]. + #[deeplink] + pub fn find_one_and_delete(&self, filter: Document) -> FindOneAndDelete<'_, T> { + FindOneAndDelete { + coll: self, + filter, + options: None, + session: None, + } + } + + /// Atomically finds up to one document in the collection matching `filter` and updates it. + /// Both `Document` and `Vec` implement `Into`, so either can be + /// passed in place of constructing the enum case. Note: pipeline updates are only supported + /// in MongoDB 4.2+. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// `await` will return d[`Result>`]. + #[deeplink] + pub fn find_one_and_update( + &self, + filter: Document, + update: impl Into, + ) -> FindOneAndUpdate<'_, T> { + FindOneAndUpdate { + coll: self, + filter, + update: update.into(), + options: None, + session: None, + } + } +} + +impl Collection { + /// Atomically finds up to one document in the collection matching `filter` and replaces it with + /// `replacement`. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// `await` will return d[`Result>`]. + #[deeplink] + pub fn find_one_and_replace( + &self, + filter: Document, + replacement: impl Borrow, + ) -> FindOneAndReplace<'_, T> { + FindOneAndReplace { + coll: self, + filter, + replacement: serde_util::to_raw_document_buf_with_options( + replacement.borrow(), + self.human_readable_serialization(), + ), + options: None, + session: None, + } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::Collection { + /// Atomically finds up to one document in the collection matching `filter` and deletes it. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// [`run`](FindOneAndDelete::run) will return d[`Result>`]. + #[deeplink] + pub fn find_one_and_delete(&self, filter: Document) -> FindOneAndDelete<'_, T> { + self.async_collection.find_one_and_delete(filter) + } + + /// Atomically finds up to one document in the collection matching `filter` and updates it. + /// Both `Document` and `Vec` implement `Into`, so either can be + /// passed in place of constructing the enum case. Note: pipeline updates are only supported + /// in MongoDB 4.2+. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// [`run`](FindOneAndDelete::run) will return d[`Result>`]. + #[deeplink] + pub fn find_one_and_update( + &self, + filter: Document, + update: impl Into, + ) -> FindOneAndUpdate<'_, T> { + self.async_collection.find_one_and_update(filter, update) + } +} + +#[cfg(feature = "sync")] +impl crate::sync::Collection { + /// Atomically finds up to one document in the collection matching `filter` and replaces it with + /// `replacement`. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// [`run`](FindOneAndReplace::run) will return d[`Result>`]. + #[deeplink] + pub fn find_one_and_replace( + &self, + filter: Document, + replacement: impl Borrow, + ) -> FindOneAndReplace<'_, T> { + self.async_collection + .find_one_and_replace(filter, replacement) + } +} + +/// Atomically finds up to one document in the collection matching a filter and deletes it. +/// Construct with [`Collection::find_one_and_delete`]. +#[must_use] +pub struct FindOneAndDelete<'a, T: Send + Sync> { + coll: &'a Collection, + filter: Document, + options: Option, + session: Option<&'a mut ClientSession>, +} + +impl<'a, T: Send + Sync> FindOneAndDelete<'a, T> { + option_setters! { options: FindOneAndDeleteOptions; + max_time: Duration, + projection: Document, + sort: Document, + write_concern: WriteConcern, + collation: Collation, + hint: Hint, + let_vars: Document, + comment: Bson, + } + + /// Use the provided session when running the operation. + pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { + self.session = Some(value.into()); + self + } +} + +#[action_impl] +impl<'a, T: DeserializeOwned + Send + Sync> Action for FindOneAndDelete<'a, T> { + type Future = FindOneAndDeleteFuture; + + async fn execute(self) -> Result> { + self.coll + .find_and_modify( + self.filter, + Modification::Delete, + self.options.map(FindAndModifyOptions::from), + self.session, + ) + .await + } +} + +/// Atomically finds up to one document in the collection matching a filter and updates it. +/// Construct with [`Collection::find_one_and_update`]. +#[must_use] +pub struct FindOneAndUpdate<'a, T: Send + Sync> { + coll: &'a Collection, + filter: Document, + update: UpdateModifications, + options: Option, + session: Option<&'a mut ClientSession>, +} + +impl<'a, T: Send + Sync> FindOneAndUpdate<'a, T> { + option_setters! { options: FindOneAndUpdateOptions; + array_filters: Vec, + bypass_document_validation: bool, + max_time: Duration, + projection: Document, + return_document: ReturnDocument, + sort: Document, + upsert: bool, + write_concern: WriteConcern, + collation: Collation, + hint: Hint, + let_vars: Document, + comment: Bson, + } + + /// Use the provided session when running the operation. + pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { + self.session = Some(value.into()); + self + } +} + +#[action_impl] +impl<'a, T: DeserializeOwned + Send + Sync> Action for FindOneAndUpdate<'a, T> { + type Future = FindOneAndUpdateFuture; + + async fn execute(self) -> Result> { + self.coll + .find_and_modify( + self.filter, + Modification::Update(self.update.into()), + self.options.map(FindAndModifyOptions::from), + self.session, + ) + .await + } +} + +/// Atomically finds up to one document in the collection matching a filter and replaces it. +/// Construct with [`Collection::find_one_and_replace`]. +#[must_use] +pub struct FindOneAndReplace<'a, T: Send + Sync> { + coll: &'a Collection, + filter: Document, + replacement: Result, + options: Option, + session: Option<&'a mut ClientSession>, +} + +impl<'a, T: Send + Sync> FindOneAndReplace<'a, T> { + option_setters! { options: FindOneAndReplaceOptions; + bypass_document_validation: bool, + max_time: Duration, + projection: Document, + return_document: ReturnDocument, + sort: Document, + upsert: bool, + write_concern: WriteConcern, + collation: Collation, + hint: Hint, + let_vars: Document, + comment: Bson, + } + + /// Use the provided session when running the operation. + pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { + self.session = Some(value.into()); + self + } +} + +#[action_impl] +impl<'a, T: DeserializeOwned + Send + Sync> Action for FindOneAndReplace<'a, T> { + type Future = FindOneAndReplaceFuture; + + async fn execute(self) -> Result> { + self.coll + .find_and_modify( + self.filter, + Modification::Update(UpdateOrReplace::Replacement(self.replacement?)), + self.options.map(FindAndModifyOptions::from), + self.session, + ) + .await + } +} diff --git a/src/action/gridfs.rs b/src/action/gridfs.rs new file mode 100644 index 000000000..bcbedd02a --- /dev/null +++ b/src/action/gridfs.rs @@ -0,0 +1,15 @@ +//! Action builders for gridfs. + +mod delete; +mod download; +mod drop; +mod find; +mod rename; +mod upload; + +pub use delete::Delete; +pub use download::{OpenDownloadStream, OpenDownloadStreamByName}; +pub use drop::Drop; +pub use find::{Find, FindOne}; +pub use rename::Rename; +pub use upload::OpenUploadStream; diff --git a/src/action/gridfs/delete.rs b/src/action/gridfs/delete.rs new file mode 100644 index 000000000..9ac36d20e --- /dev/null +++ b/src/action/gridfs/delete.rs @@ -0,0 +1,68 @@ +use bson::{doc, Bson}; + +#[cfg(docsrs)] +use crate::gridfs::FilesCollectionDocument; +use crate::{ + action::action_impl, + error::{ErrorKind, GridFsErrorKind, GridFsFileIdentifier, Result}, + GridFsBucket, +}; + +impl GridFsBucket { + /// Deletes the [`FilesCollectionDocument`] with the given `id` and its associated chunks from + /// this bucket. This method returns an error if the `id` does not match any files in the + /// bucket. + /// + /// `await` will return [`Result<()>`]. + pub fn delete(&self, id: Bson) -> Delete { + Delete { bucket: self, id } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::gridfs::GridFsBucket { + /// Deletes the [`FilesCollectionDocument`] with the given `id` and its associated chunks from + /// this bucket. This method returns an error if the `id` does not match any files in the + /// bucket. + /// + /// [`run`](Delete::run) will return [`Result<()>`]. + pub fn delete(&self, id: Bson) -> Delete { + self.async_bucket.delete(id) + } +} + +/// Deletes a specific [`FilesCollectionDocument`] and its associated chunks. Construct with +/// [`GridFsBucket::delete`]. +#[must_use] +pub struct Delete<'a> { + bucket: &'a GridFsBucket, + id: Bson, +} + +#[action_impl] +impl<'a> Action for Delete<'a> { + type Future = DeleteFuture; + + async fn execute(self) -> Result<()> { + let delete_result = self + .bucket + .files() + .delete_one(doc! { "_id": self.id.clone() }) + .await?; + // Delete chunks regardless of whether a file was found. This will remove any possibly + // orphaned chunks. + self.bucket + .chunks() + .delete_many(doc! { "files_id": self.id.clone() }) + .await?; + + if delete_result.deleted_count == 0 { + return Err(ErrorKind::GridFs(GridFsErrorKind::FileNotFound { + identifier: GridFsFileIdentifier::Id(self.id), + }) + .into()); + } + + Ok(()) + } +} diff --git a/src/action/gridfs/download.rs b/src/action/gridfs/download.rs new file mode 100644 index 000000000..520223ab6 --- /dev/null +++ b/src/action/gridfs/download.rs @@ -0,0 +1,168 @@ +use bson::{doc, Bson}; + +use crate::{ + action::{action_impl, deeplink, option_setters}, + error::{ErrorKind, GridFsErrorKind, GridFsFileIdentifier, Result}, + gridfs::{FilesCollectionDocument, GridFsDownloadByNameOptions}, + GridFsBucket, + GridFsDownloadStream, +}; + +impl GridFsBucket { + /// Opens and returns a [`GridFsDownloadStream`] from which the application can read + /// the contents of the stored file specified by `id`. + /// + /// `await` will return d[`Result`]. + #[deeplink] + pub fn open_download_stream(&self, id: Bson) -> OpenDownloadStream { + OpenDownloadStream { bucket: self, id } + } + + /// Opens and returns a [`GridFsDownloadStream`] from which the application can read + /// the contents of the stored file specified by `filename`. + /// + /// If there are multiple files in the bucket with the given filename, the `revision` in the + /// options provided is used to determine which one to download. See the documentation for + /// [`GridFsDownloadByNameOptions`] for details on how to specify a revision. If no revision is + /// provided, the file with `filename` most recently uploaded will be downloaded. + /// + /// `await` will return d[`Result`]. + #[deeplink] + pub fn open_download_stream_by_name( + &self, + filename: impl Into, + ) -> OpenDownloadStreamByName { + OpenDownloadStreamByName { + bucket: self, + filename: filename.into(), + options: None, + } + } + + // Utility functions for finding files within the bucket. + + async fn find_file_by_id(&self, id: &Bson) -> Result { + match self.find_one(doc! { "_id": id }).await? { + Some(file) => Ok(file), + None => Err(ErrorKind::GridFs(GridFsErrorKind::FileNotFound { + identifier: GridFsFileIdentifier::Id(id.clone()), + }) + .into()), + } + } + + async fn find_file_by_name( + &self, + filename: &str, + options: Option, + ) -> Result { + let revision = options.and_then(|opts| opts.revision).unwrap_or(-1); + let (sort, skip) = if revision >= 0 { + (1, revision) + } else { + (-1, -revision - 1) + }; + + match self + .files() + .find_one(doc! { "filename": filename }) + .sort(doc! { "uploadDate": sort }) + .skip(skip as u64) + .await? + { + Some(fcd) => Ok(fcd), + None => { + if self + .files() + .find_one(doc! { "filename": filename }) + .await? + .is_some() + { + Err(ErrorKind::GridFs(GridFsErrorKind::RevisionNotFound { revision }).into()) + } else { + Err(ErrorKind::GridFs(GridFsErrorKind::FileNotFound { + identifier: GridFsFileIdentifier::Filename(filename.into()), + }) + .into()) + } + } + } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::gridfs::GridFsBucket { + /// Opens and returns a [`GridFsDownloadStream`] from which the application can read + /// the contents of the stored file specified by `id`. + /// + /// [`run`](OpenDownloadStream::run) will return d[`Result`]. + #[deeplink] + pub fn open_download_stream(&self, id: Bson) -> OpenDownloadStream { + self.async_bucket.open_download_stream(id) + } + + /// Opens and returns a [`GridFsDownloadStream`] from which the application can read + /// the contents of the stored file specified by `filename`. + /// + /// If there are multiple files in the bucket with the given filename, the `revision` in the + /// options provided is used to determine which one to download. See the documentation for + /// [`GridFsDownloadByNameOptions`] for details on how to specify a revision. If no revision is + /// provided, the file with `filename` most recently uploaded will be downloaded. + /// + /// [`run`](OpenDownloadStreamByName::run) will return d[`Result`]. + #[deeplink] + pub fn open_download_stream_by_name( + &self, + filename: impl Into, + ) -> OpenDownloadStreamByName { + self.async_bucket.open_download_stream_by_name(filename) + } +} + +/// Opens and returns a [`GridFsDownloadStream`] from which the application can read +/// the contents of the stored file specified by an id. Construct with +/// [`GridFsBucket::open_download_stream`]. +#[must_use] +pub struct OpenDownloadStream<'a> { + bucket: &'a GridFsBucket, + id: Bson, +} + +#[action_impl(sync = crate::sync::gridfs::GridFsDownloadStream)] +impl<'a> Action for OpenDownloadStream<'a> { + type Future = OpenDownloadStreamFuture; + + async fn execute(self) -> Result { + let file = self.bucket.find_file_by_id(&self.id).await?; + GridFsDownloadStream::new(file, self.bucket.chunks()).await + } +} + +/// Opens and returns a [`GridFsDownloadStream`] from which the application can read +/// the contents of the stored file specified by a filename. Construct with +/// [`GridFsBucket::open_download_stream_by_name`]. +#[must_use] +pub struct OpenDownloadStreamByName<'a> { + bucket: &'a GridFsBucket, + filename: String, + options: Option, +} + +impl<'a> OpenDownloadStreamByName<'a> { + option_setters! { options: GridFsDownloadByNameOptions; + revision: i32, + } +} + +#[action_impl(sync = crate::sync::gridfs::GridFsDownloadStream)] +impl<'a> Action for OpenDownloadStreamByName<'a> { + type Future = OpenDownloadStreamByNameFuture; + + async fn execute(self) -> Result { + let file = self + .bucket + .find_file_by_name(&self.filename, self.options) + .await?; + GridFsDownloadStream::new(file, self.bucket.chunks()).await + } +} diff --git a/src/action/gridfs/drop.rs b/src/action/gridfs/drop.rs new file mode 100644 index 000000000..153952b5d --- /dev/null +++ b/src/action/gridfs/drop.rs @@ -0,0 +1,39 @@ +use crate::{action::action_impl, error::Result, GridFsBucket}; + +impl GridFsBucket { + /// Removes all of the files and their associated chunks from this bucket. + /// + /// `await` will return [`Result<()>`]. + pub fn drop(&self) -> Drop { + Drop { bucket: self } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::gridfs::GridFsBucket { + /// Removes all of the files and their associated chunks from this bucket. + /// + /// [`run`](Drop::run) will return [`Result<()>`]. + pub fn drop(&self) -> Drop { + self.async_bucket.drop() + } +} + +/// Removes all of the files and their associated chunks from a bucket. Construct with +/// [`GridFsBucket::drop`]. +#[must_use] +pub struct Drop<'a> { + bucket: &'a GridFsBucket, +} + +#[action_impl] +impl<'a> Action for Drop<'a> { + type Future = DropFuture; + + async fn execute(self) -> Result<()> { + self.bucket.files().drop().await?; + self.bucket.chunks().drop().await?; + + Ok(()) + } +} diff --git a/src/action/gridfs/find.rs b/src/action/gridfs/find.rs new file mode 100644 index 000000000..25fa9138f --- /dev/null +++ b/src/action/gridfs/find.rs @@ -0,0 +1,126 @@ +use std::time::Duration; + +use bson::Document; + +use crate::{ + action::{action_impl, deeplink, option_setters}, + coll::options::{FindOneOptions, FindOptions}, + error::Result, + gridfs::{FilesCollectionDocument, GridFsFindOneOptions, GridFsFindOptions}, + Cursor, + GridFsBucket, +}; + +impl GridFsBucket { + /// Finds and returns the [`FilesCollectionDocument`]s within this bucket that match the given + /// filter. + /// + /// `await` will return d[`Result>`]. + #[deeplink] + pub fn find(&self, filter: Document) -> Find { + Find { + bucket: self, + filter, + options: None, + } + } + + /// Finds and returns a single [`FilesCollectionDocument`] within this bucket that matches the + /// given filter. + /// + /// `await` will return d[`Result>`]. + #[deeplink] + pub fn find_one(&self, filter: Document) -> FindOne { + FindOne { + bucket: self, + filter, + options: None, + } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::gridfs::GridFsBucket { + /// Finds and returns the [`FilesCollectionDocument`]s within this bucket that match the given + /// filter. + /// + /// [`run`](Find::run) will return d[`Result>`]. + #[deeplink] + pub fn find(&self, filter: Document) -> Find { + self.async_bucket.find(filter) + } + + /// Finds and returns a single [`FilesCollectionDocument`] within this bucket that matches the + /// given filter. + /// + /// [`run`](FindOne::run) will return d[`Result>`]. + #[deeplink] + pub fn find_one(&self, filter: Document) -> FindOne { + self.async_bucket.find_one(filter) + } +} + +/// Finds and returns the [`FilesCollectionDocument`]s within a bucket that match a given +/// filter. Construct with [`GridFsBucket::find`]. +#[must_use] +pub struct Find<'a> { + bucket: &'a GridFsBucket, + filter: Document, + options: Option, +} + +impl<'a> Find<'a> { + option_setters! { options: GridFsFindOptions; + allow_disk_use: bool, + batch_size: u32, + limit: i64, + max_time: Duration, + skip: u64, + sort: Document, + } +} + +#[action_impl(sync = crate::sync::Cursor)] +impl<'a> Action for Find<'a> { + type Future = FindFuture; + + async fn execute(self) -> Result> { + let find_options = self.options.map(FindOptions::from); + self.bucket + .files() + .find(self.filter) + .with_options(find_options) + .await + } +} + +/// Finds and returns a single [`FilesCollectionDocument`] within a bucket that matches a +/// given filter. Construct with [`GridFsBucket::find_one`]. +#[must_use] +pub struct FindOne<'a> { + bucket: &'a GridFsBucket, + filter: Document, + options: Option, +} + +impl<'a> FindOne<'a> { + option_setters! { options: GridFsFindOneOptions; + max_time: Duration, + skip: u64, + sort: Document, + } +} + +#[action_impl] +impl<'a> Action for FindOne<'a> { + type Future = FindOneFuture; + + async fn execute(self) -> Result> { + let find_options = self.options.map(FindOneOptions::from); + self.bucket + .files() + .find_one(self.filter) + .with_options(find_options) + .await + } +} diff --git a/src/action/gridfs/rename.rs b/src/action/gridfs/rename.rs new file mode 100644 index 000000000..aab448e07 --- /dev/null +++ b/src/action/gridfs/rename.rs @@ -0,0 +1,53 @@ +use bson::{doc, Bson}; + +use crate::{action::action_impl, error::Result, GridFsBucket}; + +impl GridFsBucket { + /// Renames the file with the given 'id' to the provided `new_filename`. This method returns an + /// error if the `id` does not match any files in the bucket. + /// + /// `await` will return [`Result<()>`]. + pub fn rename(&self, id: Bson, new_filename: impl Into) -> Rename { + Rename { + bucket: self, + id, + new_filename: new_filename.into(), + } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::gridfs::GridFsBucket { + /// Renames the file with the given `id` to the provided `new_filename`. This method returns an + /// error if the `id` does not match any files in the bucket. + /// + /// [`run`](Rename::run) will return [`Result<()>`]. + pub fn rename(&self, id: Bson, new_filename: impl Into) -> Rename { + self.async_bucket.rename(id, new_filename) + } +} + +/// Renames a file. Construct with [`GridFsBucket::rename`]. +#[must_use] +pub struct Rename<'a> { + bucket: &'a GridFsBucket, + id: Bson, + new_filename: String, +} + +#[action_impl] +impl<'a> Action for Rename<'a> { + type Future = RenameFuture; + + async fn execute(self) -> Result<()> { + self.bucket + .files() + .update_one( + doc! { "_id": self.id }, + doc! { "$set": { "filename": self.new_filename } }, + ) + .await?; + + Ok(()) + } +} diff --git a/src/action/gridfs/upload.rs b/src/action/gridfs/upload.rs new file mode 100644 index 000000000..ae14d5ca8 --- /dev/null +++ b/src/action/gridfs/upload.rs @@ -0,0 +1,87 @@ +use bson::{oid::ObjectId, Bson, Document}; + +#[cfg(docsrs)] +use crate::gridfs::FilesCollectionDocument; +use crate::{ + action::{action_impl, deeplink, option_setters}, + error::Result, + gridfs::GridFsUploadOptions, + GridFsBucket, + GridFsUploadStream, +}; + +impl GridFsBucket { + /// Creates and returns a [`GridFsUploadStream`] that the application can write the contents of + /// the file to. + /// + /// `await` will return d[`Result`]. + #[deeplink] + pub fn open_upload_stream(&self, filename: impl AsRef) -> OpenUploadStream { + OpenUploadStream { + bucket: self, + filename: filename.as_ref().to_owned(), + id: None, + options: None, + } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::gridfs::GridFsBucket { + /// Creates and returns a [`GridFsUploadStream`] that the application can write the contents of + /// the file to. + /// + /// [`run`](OpenUploadStream::run) will return d[`Result`]. + #[deeplink] + pub fn open_upload_stream(&self, filename: impl AsRef) -> OpenUploadStream { + self.async_bucket.open_upload_stream(filename) + } +} + +/// Creates and returns a [`GridFsUploadStream`] that the application can write the contents of +/// a file to. Construct with [`GridFsBucket::open_upload_stream`]. +#[must_use] +pub struct OpenUploadStream<'a> { + bucket: &'a GridFsBucket, + filename: String, + id: Option, + options: Option, +} + +impl<'a> OpenUploadStream<'a> { + /// Set the value to be used for the corresponding [`FilesCollectionDocument`]'s `id` + /// field. If not set, a unique [`ObjectId`] will be generated that can be accessed via the + /// stream's [`id`](GridFsUploadStream::id) method. + pub fn id(mut self, value: Bson) -> Self { + self.id = Some(value); + self + } + + option_setters! { options: GridFsUploadOptions; + chunk_size_bytes: u32, + metadata: Document, + } +} + +#[action_impl(sync = crate::sync::gridfs::GridFsUploadStream)] +impl<'a> Action for OpenUploadStream<'a> { + type Future = OpenUploadStreamFuture; + + async fn execute(self) -> Result { + let id = self.id.unwrap_or_else(|| ObjectId::new().into()); + let chunk_size_bytes = self + .options + .as_ref() + .and_then(|opts| opts.chunk_size_bytes) + .unwrap_or_else(|| self.bucket.chunk_size_bytes()); + let metadata = self.options.and_then(|opts| opts.metadata); + Ok(GridFsUploadStream::new( + self.bucket.clone(), + id, + self.filename, + chunk_size_bytes, + metadata, + self.bucket.client().register_async_drop(), + )) + } +} diff --git a/src/action/insert_many.rs b/src/action/insert_many.rs new file mode 100644 index 000000000..147c65c5f --- /dev/null +++ b/src/action/insert_many.rs @@ -0,0 +1,198 @@ +use std::{borrow::Borrow, collections::HashSet, ops::Deref}; + +use bson::{Bson, RawDocumentBuf}; +use serde::Serialize; + +use crate::{ + coll::options::InsertManyOptions, + error::{BulkWriteError, BulkWriteFailure, Error, ErrorKind, Result}, + operation::Insert as Op, + options::WriteConcern, + results::InsertManyResult, + serde_util, + ClientSession, + Collection, +}; + +use super::{action_impl, deeplink, option_setters, CollRef}; + +impl Collection { + /// Inserts the data in `docs` into the collection. + /// + /// Note that this method accepts both owned and borrowed values, so the input documents + /// do not need to be cloned in order to be passed in. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// `await` will return d[`Result`]. + #[deeplink] + pub fn insert_many(&self, docs: impl IntoIterator>) -> InsertMany { + let human_readable = self.human_readable_serialization(); + InsertMany { + coll: CollRef::new(self), + docs: docs + .into_iter() + .map(|v| serde_util::to_raw_document_buf_with_options(v.borrow(), human_readable)) + .collect(), + options: None, + session: None, + } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::Collection { + /// Inserts the data in `docs` into the collection. + /// + /// Note that this method accepts both owned and borrowed values, so the input documents + /// do not need to be cloned in order to be passed in. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// [`run`](InsertMany::run) will return d[`Result`]. + #[deeplink] + pub fn insert_many(&self, docs: impl IntoIterator>) -> InsertMany { + self.async_collection.insert_many(docs) + } +} + +/// Inserts documents into a collection. Construct with [`Collection::insert_many`]. +#[must_use] +pub struct InsertMany<'a> { + coll: CollRef<'a>, + docs: Result>, + options: Option, + session: Option<&'a mut ClientSession>, +} + +impl<'a> InsertMany<'a> { + option_setters! { options: InsertManyOptions; + bypass_document_validation: bool, + ordered: bool, + write_concern: WriteConcern, + comment: Bson, + } + + /// Use the provided session when running the operation. + pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { + self.session = Some(value.into()); + self + } +} + +#[action_impl] +impl<'a> Action for InsertMany<'a> { + type Future = InsertManyFuture; + + async fn execute(mut self) -> Result { + resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; + + let ds = self.docs?; + if ds.is_empty() { + return Err(ErrorKind::InvalidArgument { + message: "No documents provided to insert_many".to_string(), + } + .into()); + } + let ordered = self + .options + .as_ref() + .and_then(|o| o.ordered) + .unwrap_or(true); + #[cfg(feature = "in-use-encryption-unstable")] + let encrypted = self.coll.client().auto_encryption_opts().await.is_some(); + #[cfg(not(feature = "in-use-encryption-unstable"))] + let encrypted = false; + + let mut cumulative_failure: Option = None; + let mut error_labels: HashSet = Default::default(); + let mut cumulative_result: Option = None; + + let mut n_attempted = 0; + + while n_attempted < ds.len() { + let docs: Vec<_> = ds.iter().skip(n_attempted).map(Deref::deref).collect(); + let insert = Op::new(self.coll.namespace(), docs, self.options.clone(), encrypted); + + match self + .coll + .client() + .execute_operation(insert, self.session.as_deref_mut()) + .await + { + Ok(result) => { + let current_batch_size = result.inserted_ids.len(); + + let cumulative_result = + cumulative_result.get_or_insert_with(InsertManyResult::new); + for (index, id) in result.inserted_ids { + cumulative_result + .inserted_ids + .insert(index + n_attempted, id); + } + + n_attempted += current_batch_size; + } + Err(e) => { + let labels = e.labels().clone(); + match *e.kind { + ErrorKind::BulkWrite(bw) => { + // for ordered inserts this size will be incorrect, but knowing the + // batch size isn't needed for ordered + // failures since we return immediately from + // them anyways. + let current_batch_size = bw.inserted_ids.len() + + bw.write_errors.as_ref().map(|we| we.len()).unwrap_or(0); + + let failure_ref = + cumulative_failure.get_or_insert_with(BulkWriteFailure::new); + if let Some(write_errors) = bw.write_errors { + for err in write_errors { + let index = n_attempted + err.index; + + failure_ref + .write_errors + .get_or_insert_with(Default::default) + .push(BulkWriteError { index, ..err }); + } + } + + if let Some(wc_error) = bw.write_concern_error { + failure_ref.write_concern_error = Some(wc_error); + } + + error_labels.extend(labels); + + if ordered { + // this will always be true since we invoked get_or_insert_with + // above. + if let Some(failure) = cumulative_failure { + return Err(Error::new( + ErrorKind::BulkWrite(failure), + Some(error_labels), + )); + } + } + n_attempted += current_batch_size; + } + _ => return Err(e), + } + } + } + } + + match cumulative_failure { + Some(failure) => Err(Error::new( + ErrorKind::BulkWrite(failure), + Some(error_labels), + )), + None => Ok(cumulative_result.unwrap_or_else(InsertManyResult::new)), + } + } +} diff --git a/src/action/insert_one.rs b/src/action/insert_one.rs new file mode 100644 index 000000000..effe9448b --- /dev/null +++ b/src/action/insert_one.rs @@ -0,0 +1,114 @@ +use std::{borrow::Borrow, ops::Deref}; + +use bson::{Bson, RawDocumentBuf}; +use serde::Serialize; + +use crate::{ + coll::options::{InsertManyOptions, InsertOneOptions}, + error::{convert_bulk_errors, Result}, + operation::Insert as Op, + options::WriteConcern, + results::InsertOneResult, + serde_util, + ClientSession, + Collection, +}; + +use super::{action_impl, deeplink, option_setters, CollRef}; + +impl Collection { + /// Inserts `doc` into the collection. + /// + /// Note that either an owned or borrowed value can be inserted here, so the input document + /// does not need to be cloned to be passed in. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// `await` will return d[`Result`]. + #[deeplink] + pub fn insert_one(&self, doc: impl Borrow) -> InsertOne { + InsertOne { + coll: CollRef::new(self), + doc: serde_util::to_raw_document_buf_with_options( + doc.borrow(), + self.human_readable_serialization(), + ), + options: None, + session: None, + } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::Collection { + /// Inserts `doc` into the collection. + /// + /// Note that either an owned or borrowed value can be inserted here, so the input document + /// does not need to be cloned to be passed in. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// [`run`](InsertOne::run) will return d[`Result`]. + #[deeplink] + pub fn insert_one(&self, doc: impl Borrow) -> InsertOne { + self.async_collection.insert_one(doc) + } +} + +/// Inserts a document into a collection. Construct with ['Collection::insert_one`]. +#[must_use] +pub struct InsertOne<'a> { + coll: CollRef<'a>, + doc: Result, + options: Option, + session: Option<&'a mut ClientSession>, +} + +impl<'a> InsertOne<'a> { + option_setters! { options: InsertOneOptions; + bypass_document_validation: bool, + write_concern: WriteConcern, + comment: Bson, + } + + /// Use the provided session when running the operation. + pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { + self.session = Some(value.into()); + self + } +} + +#[action_impl] +impl<'a> Action for InsertOne<'a> { + type Future = InsertOneFuture; + + async fn execute(mut self) -> Result { + resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; + + #[cfg(feature = "in-use-encryption-unstable")] + let encrypted = self.coll.client().auto_encryption_opts().await.is_some(); + #[cfg(not(feature = "in-use-encryption-unstable"))] + let encrypted = false; + + let doc = self.doc?; + + let insert = Op::new( + self.coll.namespace(), + vec![doc.deref()], + self.options.map(InsertManyOptions::from_insert_one_options), + encrypted, + ); + self.coll + .client() + .execute_operation(insert, self.session) + .await + .map(InsertOneResult::from_insert_many_result) + .map_err(convert_bulk_errors) + } +} diff --git a/src/action/list_collections.rs b/src/action/list_collections.rs index 15dcb2ed0..21a2ef594 100644 --- a/src/action/list_collections.rs +++ b/src/action/list_collections.rs @@ -16,6 +16,7 @@ use crate::{ use super::{ action_impl, + deeplink, option_setters, ExplicitSession, ImplicitSession, @@ -26,7 +27,8 @@ use super::{ impl Database { /// Gets information about each of the collections in the database. /// - /// `await` will return `Result<`[`Cursor`]`<`[`CollectionSpecification`]`>>`. + /// `await` will return d[`Result>`]. + #[deeplink] pub fn list_collections(&self) -> ListCollections { ListCollections { db: self, @@ -38,7 +40,8 @@ impl Database { /// Gets the names of the collections in the database. /// - /// `await` will return `Result>`. + /// `await` will return d[`Result>`]. + #[deeplink] pub fn list_collection_names(&self) -> ListCollections<'_, ListNames> { ListCollections { db: self, @@ -54,14 +57,16 @@ impl crate::sync::Database { /// Gets information about each of the collections in the database. /// /// [`run`](ListCollections::run) will return - /// `Result<`[`Cursor`]`<`[`CollectionSpecification`]`>>`. + /// d[`Result>`]. + #[deeplink] pub fn list_collections(&self) -> ListCollections { self.async_database.list_collections() } /// Gets the names of the collections in the database. /// - /// [`run`](ListCollections::run) will return `Result>`. + /// [`run`](ListCollections::run) will return d[`Result>`]. + #[deeplink] pub fn list_collection_names(&self) -> ListCollections<'_, ListNames> { self.async_database.list_collection_names() } @@ -87,7 +92,7 @@ impl<'a, M, S> ListCollections<'a, M, S> { } impl<'a, M> ListCollections<'a, M, ImplicitSession> { - /// Runs the query using the provided session. + /// Use the provided session when running the operation. pub fn session<'s>( self, value: impl Into<&'s mut ClientSession>, @@ -101,45 +106,31 @@ impl<'a, M> ListCollections<'a, M, ImplicitSession> { } } -action_impl! { - impl<'a> Action for ListCollections<'a, ListSpecifications, ImplicitSession> { - type Future = ListCollectionsFuture; - - async fn execute(self) -> Result> { - let list_collections = op::ListCollections::new( - self.db.name().to_string(), - false, - self.options, - ); - self.db.client() - .execute_cursor_operation(list_collections) - .await - } - - fn sync_wrap(out) -> Result> { - out.map(crate::sync::Cursor::new) - } +#[action_impl(sync = crate::sync::Cursor)] +impl<'a> Action for ListCollections<'a, ListSpecifications, ImplicitSession> { + type Future = ListCollectionsFuture; + + async fn execute(self) -> Result> { + let list_collections = + op::ListCollections::new(self.db.name().to_string(), false, self.options); + self.db + .client() + .execute_cursor_operation(list_collections) + .await } } -action_impl! { - impl<'a> Action for ListCollections<'a, ListSpecifications, ExplicitSession<'a>> { - type Future = ListCollectionsSessionFuture; - - async fn execute(self) -> Result> { - let list_collections = op::ListCollections::new( - self.db.name().to_string(), - false, - self.options, - ); - self.db.client() - .execute_session_cursor_operation(list_collections, self.session.0) - .await - } - - fn sync_wrap(out) -> Result> { - out.map(crate::sync::SessionCursor::new) - } +#[action_impl(sync = crate::sync::SessionCursor)] +impl<'a> Action for ListCollections<'a, ListSpecifications, ExplicitSession<'a>> { + type Future = ListCollectionsSessionFuture; + + async fn execute(self) -> Result> { + let list_collections = + op::ListCollections::new(self.db.name().to_string(), false, self.options); + self.db + .client() + .execute_session_cursor_operation(list_collections, self.session.0) + .await } } @@ -161,41 +152,35 @@ async fn list_collection_names_common( .await } -action_impl! { - impl<'a> Action for ListCollections<'a, ListNames, ImplicitSession> { - type Future = ListCollectionNamesFuture; - - async fn execute(self) -> Result> { - let list_collections = op::ListCollections::new( - self.db.name().to_string(), - true, - self.options, - ); - let cursor: Cursor = self.db.client() - .execute_cursor_operation(list_collections) - .await?; - return list_collection_names_common(cursor).await; - } +#[action_impl] +impl<'a> Action for ListCollections<'a, ListNames, ImplicitSession> { + type Future = ListCollectionNamesFuture; + + async fn execute(self) -> Result> { + let list_collections = + op::ListCollections::new(self.db.name().to_string(), true, self.options); + let cursor: Cursor = self + .db + .client() + .execute_cursor_operation(list_collections) + .await?; + return list_collection_names_common(cursor).await; } } -action_impl! { - impl<'a> Action for ListCollections<'a, ListNames, ExplicitSession<'a>> { - type Future = ListCollectionNamesSessionFuture; - - async fn execute(self) -> Result> { - let list_collections = op::ListCollections::new( - self.db.name().to_string(), - true, - self.options, - ); - let mut cursor: SessionCursor = self - .db.client() - .execute_session_cursor_operation(list_collections, &mut *self.session.0) - .await?; - - list_collection_names_common(cursor.stream(self.session.0)) - .await - } +#[action_impl] +impl<'a> Action for ListCollections<'a, ListNames, ExplicitSession<'a>> { + type Future = ListCollectionNamesSessionFuture; + + async fn execute(self) -> Result> { + let list_collections = + op::ListCollections::new(self.db.name().to_string(), true, self.options); + let mut cursor: SessionCursor = self + .db + .client() + .execute_session_cursor_operation(list_collections, &mut *self.session.0) + .await?; + + list_collection_names_common(cursor.stream(self.session.0)).await } } diff --git a/src/action/list_databases.rs b/src/action/list_databases.rs index f4c7c28d7..8744f8be6 100644 --- a/src/action/list_databases.rs +++ b/src/action/list_databases.rs @@ -13,12 +13,13 @@ use crate::{ ClientSession, }; -use super::{action_impl, option_setters, ListNames, ListSpecifications}; +use super::{action_impl, deeplink, option_setters, ListNames, ListSpecifications}; impl Client { /// Gets information about each database present in the cluster the Client is connected to. /// - /// `await` will return `Result>`. + /// `await` will return d[`Result>`]. + #[deeplink] pub fn list_databases(&self) -> ListDatabases { ListDatabases { client: self, @@ -30,7 +31,8 @@ impl Client { /// Gets the names of the databases present in the cluster the Client is connected to. /// - /// `await` will return `Result>`. + /// `await` will return d[`Result>`]. + #[deeplink] pub fn list_database_names(&self) -> ListDatabases<'_, ListNames> { ListDatabases { client: self, @@ -45,14 +47,16 @@ impl Client { impl SyncClient { /// Gets information about each database present in the cluster the Client is connected to. /// - /// [run](ListDatabases::run) will return `Result>`. + /// [run](ListDatabases::run) will return d[`Result>`]. + #[deeplink] pub fn list_databases(&self) -> ListDatabases { self.async_client.list_databases() } /// Gets the names of the databases present in the cluster the Client is connected to. /// - /// [run](ListDatabases::run) will return `Result>`. + /// [run](ListDatabases::run) will return d[`Result>`]. + #[deeplink] pub fn list_database_names(&self) -> ListDatabases<'_, ListNames> { self.async_client.list_database_names() } @@ -75,55 +79,53 @@ impl<'a, M> ListDatabases<'a, M> { comment: Bson, ); - /// Runs the query using the provided session. + /// Use the provided session when running the operation. pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { self.session = Some(value.into()); self } } -action_impl! { - impl<'a> Action for ListDatabases<'a, ListSpecifications> { - type Future = ListDatabasesFuture; +#[action_impl] +impl<'a> Action for ListDatabases<'a, ListSpecifications> { + type Future = ListDatabasesFuture; - async fn execute(self) -> Result> { - let op = op::ListDatabases::new(false, self.options); - self.client - .execute_operation(op, self.session) - .await - .and_then(|dbs| { - dbs.into_iter() - .map(|db_spec| { - bson::from_slice(db_spec.as_bytes()).map_err(crate::error::Error::from) - }) - .collect() + async fn execute(self) -> Result> { + let op = op::ListDatabases::new(false, self.options); + self.client + .execute_operation(op, self.session) + .await + .and_then(|dbs| { + dbs.into_iter() + .map(|db_spec| { + bson::from_slice(db_spec.as_bytes()).map_err(crate::error::Error::from) }) - } + .collect() + }) } } -action_impl! { - impl<'a> Action for ListDatabases<'a, ListNames> { - type Future = ListDatabaseNamesFuture; +#[action_impl] +impl<'a> Action for ListDatabases<'a, ListNames> { + type Future = ListDatabaseNamesFuture; - async fn execute(self) -> Result> { - let op = op::ListDatabases::new(true, self.options); - match self.client.execute_operation(op, self.session).await { - Ok(databases) => databases - .into_iter() - .map(|doc| { - let name = doc - .get_str("name") - .map_err(|_| ErrorKind::InvalidResponse { - message: "Expected \"name\" field in server response, but it was \ - not found" - .to_string(), - })?; - Ok(name.to_string()) - }) - .collect(), - Err(e) => Err(e), - } + async fn execute(self) -> Result> { + let op = op::ListDatabases::new(true, self.options); + match self.client.execute_operation(op, self.session).await { + Ok(databases) => databases + .into_iter() + .map(|doc| { + let name = doc + .get_str("name") + .map_err(|_| ErrorKind::InvalidResponse { + message: "Expected \"name\" field in server response, but it was not \ + found" + .to_string(), + })?; + Ok(name.to_string()) + }) + .collect(), + Err(e) => Err(e), } } } diff --git a/src/action/list_indexes.rs b/src/action/list_indexes.rs index ce9e41a1c..bcbe74ca5 100644 --- a/src/action/list_indexes.rs +++ b/src/action/list_indexes.rs @@ -16,6 +16,7 @@ use crate::{ use super::{ action_impl, + deeplink, option_setters, CollRef, ExplicitSession, @@ -24,11 +25,15 @@ use super::{ ListSpecifications, }; -impl Collection { +impl Collection +where + T: Send + Sync, +{ /// Lists all indexes on this collection. /// - /// `await` will return `Result>` (or `Result>` if - /// a `ClientSession` is provided). + /// `await` will return d[`Result>`] (or + /// d[`Result>`] if a `ClientSession` is provided). + #[deeplink] pub fn list_indexes(&self) -> ListIndexes { ListIndexes { coll: CollRef::new(self), @@ -40,7 +45,8 @@ impl Collection { /// Gets the names of all indexes on the collection. /// - /// `await` will return `Result>`. + /// `await` will return d[`Result>`]. + #[deeplink] pub fn list_index_names(&self) -> ListIndexes { ListIndexes { coll: CollRef::new(self), @@ -51,19 +57,24 @@ impl Collection { } } -#[cfg(any(feature = "sync", feature = "tokio-sync"))] -impl crate::sync::Collection { +#[cfg(feature = "sync")] +impl crate::sync::Collection +where + T: Send + Sync, +{ /// Lists all indexes on this collection. /// - /// [`run`](ListIndexes::run) will return `Result>` (or - /// `Result>` if a `ClientSession` is provided). + /// [`run`](ListIndexes::run) will return d[`Result>`] (or + /// d[`Result>`] if a `ClientSession` is provided). + #[deeplink] pub fn list_indexes(&self) -> ListIndexes { self.async_collection.list_indexes() } /// Gets the names of all indexes on the collection. /// - /// [`run`](ListIndexes::run) will return `Result>`. + /// [`run`](ListIndexes::run) will return d[`Result>`]. + #[deeplink] pub fn list_index_names(&self) -> ListIndexes { self.async_collection.list_index_names() } @@ -88,7 +99,7 @@ impl<'a, Mode, Session> ListIndexes<'a, Mode, Session> { } impl<'a, Mode> ListIndexes<'a, Mode, ImplicitSession> { - /// Runs the operation using the provided session. + /// Use the provided session when running the operation. pub fn session( self, value: impl Into<&'a mut ClientSession>, @@ -102,74 +113,65 @@ impl<'a, Mode> ListIndexes<'a, Mode, ImplicitSession> { } } -action_impl! { - impl<'a> Action for ListIndexes<'a, ListSpecifications, ImplicitSession> { - type Future = ListIndexesFuture; +#[action_impl(sync = crate::sync::Cursor)] +impl<'a> Action for ListIndexes<'a, ListSpecifications, ImplicitSession> { + type Future = ListIndexesFuture; - async fn execute(self) -> Result> { - let op = Op::new(self.coll.namespace(), self.options); - self.coll.client().execute_cursor_operation(op).await - } - - fn sync_wrap(out) -> Result> { - out.map(crate::sync::Cursor::new) - } + async fn execute(self) -> Result> { + let op = Op::new(self.coll.namespace(), self.options); + self.coll.client().execute_cursor_operation(op).await } } -action_impl! { - impl<'a> Action for ListIndexes<'a, ListSpecifications, ExplicitSession<'a>> { - type Future = ListIndexesSessionFuture; +#[action_impl(sync = crate::sync::SessionCursor)] +impl<'a> Action for ListIndexes<'a, ListSpecifications, ExplicitSession<'a>> { + type Future = ListIndexesSessionFuture; - async fn execute(self) -> Result> { - let op = Op::new(self.coll.namespace(), self.options); - self.coll.client().execute_session_cursor_operation(op, self.session.0).await - } - - fn sync_wrap(out) -> Result> { - out.map(crate::sync::SessionCursor::new) - } + async fn execute(self) -> Result> { + let op = Op::new(self.coll.namespace(), self.options); + self.coll + .client() + .execute_session_cursor_operation(op, self.session.0) + .await } } -action_impl! { - impl<'a> Action for ListIndexes<'a, ListNames, ImplicitSession> { - type Future = ListIndexNamesFuture; - - async fn execute(self) -> Result> { - let inner = ListIndexes { - coll: self.coll, - options: self.options, - session: self.session, - _mode: PhantomData::, - }; - let cursor = inner.await?; - cursor - .try_filter_map(|index| futures_util::future::ok(index.get_name())) - .try_collect() - .await - } +#[action_impl] +impl<'a> Action for ListIndexes<'a, ListNames, ImplicitSession> { + type Future = ListIndexNamesFuture; + + async fn execute(self) -> Result> { + let inner = ListIndexes { + coll: self.coll, + options: self.options, + session: self.session, + _mode: PhantomData::, + }; + let cursor = inner.await?; + cursor + .try_filter_map(|index| futures_util::future::ok(index.get_name())) + .try_collect() + .await } } -action_impl! { - impl<'a> Action for ListIndexes<'a, ListNames, ExplicitSession<'a>> { - type Future = ListIndexNamesSessionFuture; - - async fn execute(self) -> Result> { - let session = self.session.0; - let inner = ListIndexes { - coll: self.coll, - options: self.options, - session: ExplicitSession(&mut *session), - _mode: PhantomData::, - }; - let mut cursor = inner.await?; - let stream = cursor.stream(session); - stream - .try_filter_map(|index| futures_util::future::ok(index.get_name())) - .try_collect() - .await - } +#[action_impl] +impl<'a> Action for ListIndexes<'a, ListNames, ExplicitSession<'a>> { + type Future = ListIndexNamesSessionFuture; + + async fn execute(self) -> Result> { + let session = self.session.0; + let inner = ListIndexes { + coll: self.coll, + options: self.options, + session: ExplicitSession(&mut *session), + _mode: PhantomData::, + }; + let mut cursor = inner.await?; + let stream = cursor.stream(session); + stream + .try_filter_map(|index| futures_util::future::ok(index.get_name())) + .try_collect() + .await } } diff --git a/src/action/perf.rs b/src/action/perf.rs index d897fe80c..5d33e5727 100644 --- a/src/action/perf.rs +++ b/src/action/perf.rs @@ -37,7 +37,7 @@ impl crate::sync::Client { } } -/// Add connections to the connection pool up to `min_pool_size`. Create by calling +/// Add connections to the connection pool up to `min_pool_size`. Construct with /// [`Client::warm_connection_pool`]. #[must_use] pub struct WarmConnectionPool<'a> { diff --git a/src/action/replace_one.rs b/src/action/replace_one.rs new file mode 100644 index 000000000..fe7983f35 --- /dev/null +++ b/src/action/replace_one.rs @@ -0,0 +1,107 @@ +use std::borrow::Borrow; + +use bson::{Bson, Document, RawDocumentBuf}; +use serde::Serialize; + +use crate::{ + coll::options::{Hint, ReplaceOptions, UpdateOptions}, + collation::Collation, + error::Result, + operation::Update as Op, + options::WriteConcern, + results::UpdateResult, + serde_util, + ClientSession, + Collection, +}; + +use super::{action_impl, deeplink, option_setters, CollRef}; + +impl Collection { + /// Replaces up to one document matching `query` in the collection with `replacement`. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// `await` will return d[`Result`]. + #[deeplink] + pub fn replace_one(&self, query: Document, replacement: impl Borrow) -> ReplaceOne { + ReplaceOne { + coll: CollRef::new(self), + query, + replacement: serde_util::to_raw_document_buf_with_options( + replacement.borrow(), + self.human_readable_serialization(), + ), + options: None, + session: None, + } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::Collection { + /// Replaces up to one document matching `query` in the collection with `replacement`. + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// [`run`](ReplaceOne::run) will return d[`Result`]. + #[deeplink] + pub fn replace_one(&self, query: Document, replacement: impl Borrow) -> ReplaceOne { + self.async_collection.replace_one(query, replacement) + } +} + +/// Replace up to one document matching a query. Construct with [`Collection::replace_one`]. +#[must_use] +pub struct ReplaceOne<'a> { + coll: CollRef<'a>, + query: Document, + replacement: Result, + options: Option, + session: Option<&'a mut ClientSession>, +} + +impl<'a> ReplaceOne<'a> { + option_setters! { options: ReplaceOptions; + bypass_document_validation: bool, + upsert: bool, + collation: Collation, + hint: Hint, + write_concern: WriteConcern, + let_vars: Document, + comment: Bson, + } + + /// Use the provided session when running the operation. + pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { + self.session = Some(value.into()); + self + } +} + +#[action_impl] +impl<'a> Action for ReplaceOne<'a> { + type Future = ReplaceOneFuture; + + async fn execute(mut self) -> Result { + resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; + + let update = Op::with_replace_raw( + self.coll.namespace(), + self.query, + self.replacement?, + false, + self.options.map(UpdateOptions::from_replace_options), + )?; + self.coll + .client() + .execute_operation(update, self.session) + .await + } +} diff --git a/src/action/run_command.rs b/src/action/run_command.rs index 1e7729a31..232bb4d5d 100644 --- a/src/action/run_command.rs +++ b/src/action/run_command.rs @@ -12,7 +12,7 @@ use crate::{ SessionCursor, }; -use super::{action_impl, option_setters, ExplicitSession, ImplicitSession}; +use super::{action_impl, deeplink, option_setters, ExplicitSession, ImplicitSession}; impl Database { /// Runs a database-level command. @@ -23,7 +23,8 @@ impl Database { /// Please note that run_command doesn't validate WriteConcerns passed into the body of the /// command document. /// - /// `await` will return `Result`. + /// `await` will return d[`Result`]. + #[deeplink] pub fn run_command(&self, command: Document) -> RunCommand { RunCommand { db: self, @@ -35,8 +36,9 @@ impl Database { /// Runs a database-level command and returns a cursor to the response. /// - /// `await` will return `Result<`[`Cursor`]`>` or a - /// `Result<`[`SessionCursor`]`>` if a [`ClientSession`] is provided. + /// `await` will return d[`Result>`] or a + /// d[`Result>`] if a [`ClientSession`] is provided. + #[deeplink] pub fn run_cursor_command(&self, command: Document) -> RunCursorCommand { RunCursorCommand { db: self, @@ -57,15 +59,17 @@ impl crate::sync::Database { /// Please note that run_command doesn't validate WriteConcerns passed into the body of the /// command document. /// - /// [`run`](RunCommand::run) will return `Result`. + /// [`run`](RunCommand::run) will return d[`Result`]. + #[deeplink] pub fn run_command(&self, command: Document) -> RunCommand { self.async_database.run_command(command) } /// Runs a database-level command and returns a cursor to the response. /// - /// [`run`](RunCursorCommand::run) will return `Result<`[`Cursor`]`>` or a - /// `Result<`[`SessionCursor`]`>` if a [`ClientSession`] is provided. + /// [`run`](RunCursorCommand::run) will return d[`Result>`] or a + /// d[`Result>`] if a [`ClientSession`] is provided. + #[deeplink] pub fn run_cursor_command(&self, command: Document) -> RunCursorCommand { self.async_database.run_cursor_command(command) } @@ -92,44 +96,46 @@ impl<'a> RunCommand<'a> { } } -action_impl! { - impl<'a> Action for RunCommand<'a> { - type Future = RunCommandFuture; - - async fn execute(self) -> Result { - let mut selection_criteria = self.options.and_then(|o| o.selection_criteria); - if let Some(session) = &self.session { - match session.transaction.state { - TransactionState::Starting | TransactionState::InProgress => { - if self.command.contains_key("readConcern") { - return Err(ErrorKind::InvalidArgument { - message: "Cannot set read concern after starting a transaction".into(), - } - .into()); +#[action_impl] +impl<'a> Action for RunCommand<'a> { + type Future = RunCommandFuture; + + async fn execute(self) -> Result { + let mut selection_criteria = self.options.and_then(|o| o.selection_criteria); + if let Some(session) = &self.session { + match session.transaction.state { + TransactionState::Starting | TransactionState::InProgress => { + if self.command.contains_key("readConcern") { + return Err(ErrorKind::InvalidArgument { + message: "Cannot set read concern after starting a transaction".into(), } - selection_criteria = match selection_criteria { - Some(selection_criteria) => Some(selection_criteria), - None => { - if let Some(ref options) = session.transaction.options { - options.selection_criteria.clone() - } else { - None - } - } - }; + .into()); } - _ => {} + selection_criteria = match selection_criteria { + Some(selection_criteria) => Some(selection_criteria), + None => { + if let Some(ref options) = session.transaction.options { + options.selection_criteria.clone() + } else { + None + } + } + }; } + _ => {} } - - let operation = run_command::RunCommand::new( - self.db.name().into(), - self.command, - selection_criteria, - None, - )?; - self.db.client().execute_operation(operation, self.session).await } + + let operation = run_command::RunCommand::new( + self.db.name().into(), + self.command, + selection_criteria, + None, + )?; + self.db + .client() + .execute_operation(operation, self.session) + .await } } @@ -168,50 +174,51 @@ impl<'a> RunCursorCommand<'a, ImplicitSession> { } } -action_impl! { - impl<'a> Action for RunCursorCommand<'a, ImplicitSession> { - type Future = RunCursorCommandFuture; - - async fn execute(self) -> Result> { - let selection_criteria = self.options - .as_ref() - .and_then(|options| options.selection_criteria.clone()); - let rcc = run_command::RunCommand::new( - self.db.name().to_string(), - self.command, - selection_criteria, - None, - )?; - let rc_command = run_cursor_command::RunCursorCommand::new(rcc, self.options)?; - let client = self.db.client(); - client.execute_cursor_operation(rc_command).await - } - - fn sync_wrap(out) -> Result> { - out.map(crate::sync::Cursor::new) - } +#[action_impl(sync = crate::sync::Cursor)] +impl<'a> Action for RunCursorCommand<'a, ImplicitSession> { + type Future = RunCursorCommandFuture; + + async fn execute(self) -> Result> { + let selection_criteria = self + .options + .as_ref() + .and_then(|options| options.selection_criteria.clone()); + let rcc = run_command::RunCommand::new( + self.db.name().to_string(), + self.command, + selection_criteria, + None, + )?; + let rc_command = run_cursor_command::RunCursorCommand::new(rcc, self.options)?; + let client = self.db.client(); + client.execute_cursor_operation(rc_command).await } } -action_impl! { - impl<'a> Action for RunCursorCommand<'a, ExplicitSession<'a>> { - type Future = RunCursorCommandSessionFuture; - - async fn execute(mut self) -> Result> { - resolve_selection_criteria_with_session!(self.db, self.options, Some(&mut *self.session.0))?; - let selection_criteria = self.options - .as_ref() - .and_then(|options| options.selection_criteria.clone()); - let rcc = run_command::RunCommand::new(self.db.name().to_string(), self.command, selection_criteria, None)?; - let rc_command = run_cursor_command::RunCursorCommand::new(rcc, self.options)?; - let client = self.db.client(); - client - .execute_session_cursor_operation(rc_command, self.session.0) - .await - } - - fn sync_wrap(out) -> Result> { - out.map(crate::sync::SessionCursor::new) - } +#[action_impl(sync = crate::sync::SessionCursor)] +impl<'a> Action for RunCursorCommand<'a, ExplicitSession<'a>> { + type Future = RunCursorCommandSessionFuture; + + async fn execute(mut self) -> Result> { + resolve_selection_criteria_with_session!( + self.db, + self.options, + Some(&mut *self.session.0) + )?; + let selection_criteria = self + .options + .as_ref() + .and_then(|options| options.selection_criteria.clone()); + let rcc = run_command::RunCommand::new( + self.db.name().to_string(), + self.command, + selection_criteria, + None, + )?; + let rc_command = run_cursor_command::RunCursorCommand::new(rcc, self.options)?; + let client = self.db.client(); + client + .execute_session_cursor_operation(rc_command, self.session.0) + .await } } diff --git a/src/action/search_index.rs b/src/action/search_index.rs new file mode 100644 index 000000000..6d7912b66 --- /dev/null +++ b/src/action/search_index.rs @@ -0,0 +1,283 @@ +use std::marker::PhantomData; + +use bson::{doc, Document}; + +use super::{action_impl, deeplink, option_setters, CollRef, Multiple, Single}; +use crate::{ + coll::options::AggregateOptions, + error::{Error, Result}, + operation, + search_index::options::{ + CreateSearchIndexOptions, + DropSearchIndexOptions, + ListSearchIndexOptions, + UpdateSearchIndexOptions, + }, + Collection, + Cursor, + SearchIndexModel, +}; + +impl Collection +where + T: Send + Sync, +{ + /// Creates multiple search indexes on the collection. + /// + /// `await` will return d[`Result>`]. + #[deeplink] + pub fn create_search_indexes( + &self, + models: impl IntoIterator, + ) -> CreateSearchIndex { + CreateSearchIndex { + coll: CollRef::new(self), + models: models.into_iter().collect(), + options: None, + _mode: PhantomData, + } + } + + /// Convenience method for creating a single search index. + /// + /// `await` will return d[`Result`]. + #[deeplink] + pub fn create_search_index(&self, model: SearchIndexModel) -> CreateSearchIndex { + CreateSearchIndex { + coll: CollRef::new(self), + models: vec![model], + options: None, + _mode: PhantomData, + } + } + + /// Updates the search index with the given name to use the provided definition. + /// + /// `await` will return [`Result<()>`]. + pub fn update_search_index( + &self, + name: impl Into, + definition: Document, + ) -> UpdateSearchIndex { + UpdateSearchIndex { + coll: CollRef::new(self), + name: name.into(), + definition, + options: None, + } + } + + /// Drops the search index with the given name. + /// + /// `await` will return [`Result<()>`]. + pub fn drop_search_index(&self, name: impl Into) -> DropSearchIndex { + DropSearchIndex { + coll: CollRef::new(self), + name: name.into(), + options: None, + } + } + + /// Gets index information for one or more search indexes in the collection. + /// + /// If name is not specified, information for all indexes on the specified collection will be + /// returned. + /// + /// `await` will return d[`Result>`]. + #[deeplink] + pub fn list_search_indexes(&self) -> ListSearchIndexes { + ListSearchIndexes { + coll: CollRef::new(self), + name: None, + agg_options: None, + options: None, + } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::Collection +where + T: Send + Sync, +{ + /// Creates multiple search indexes on the collection. + /// + /// [`run`](CreateSearchIndex::run) will return d[`Result>`]. + #[deeplink] + pub fn create_search_indexes( + &self, + models: impl IntoIterator, + ) -> CreateSearchIndex { + self.async_collection.create_search_indexes(models) + } + + /// Convenience method for creating a single search index. + /// + /// [`run`](CreateSearchIndex::run) will return d[`Result`]. + #[deeplink] + pub fn create_search_index(&self, model: SearchIndexModel) -> CreateSearchIndex { + self.async_collection.create_search_index(model) + } + + /// Updates the search index with the given name to use the provided definition. + /// + /// [`run`](UpdateSearchIndex::run) will return [`Result<()>`]. + pub fn update_search_index( + &self, + name: impl Into, + definition: Document, + ) -> UpdateSearchIndex { + self.async_collection.update_search_index(name, definition) + } + + /// Drops the search index with the given name. + /// + /// [`run`](DropSearchIndex::run) will return [`Result<()>`]. + pub fn drop_search_index(&self, name: impl Into) -> DropSearchIndex { + self.async_collection.drop_search_index(name) + } + + /// Gets index information for one or more search indexes in the collection. + /// + /// If name is not specified, information for all indexes on the specified collection will be + /// returned. + /// + /// [`run`](ListSearchIndexes::run) will return d[`Result>`]. + #[deeplink] + pub fn list_search_indexes(&self) -> ListSearchIndexes { + self.async_collection.list_search_indexes() + } +} + +/// Create search indexes on a collection. Construct with [`Collection::create_search_index`] or +/// [`Collection::create_search_indexes`]. +#[must_use] +pub struct CreateSearchIndex<'a, Mode> { + coll: CollRef<'a>, + models: Vec, + options: Option, + _mode: PhantomData, +} + +impl<'a, Mode> CreateSearchIndex<'a, Mode> { + option_setters! { options: CreateSearchIndexOptions; + } +} + +#[action_impl] +impl<'a> Action for CreateSearchIndex<'a, Multiple> { + type Future = CreateSearchIndexesFuture; + + async fn execute(self) -> Result> { + let op = operation::CreateSearchIndexes::new(self.coll.namespace(), self.models); + self.coll.client().execute_operation(op, None).await + } +} + +#[action_impl] +impl<'a> Action for CreateSearchIndex<'a, Single> { + type Future = CreateSearchIndexFuture; + + async fn execute(self) -> Result { + let mut names = self + .coll + .create_search_indexes(self.models) + .with_options(self.options) + .await?; + match names.len() { + 1 => Ok(names.pop().unwrap()), + n => Err(Error::internal(format!("expected 1 index name, got {}", n))), + } + } +} + +/// Updates a specific search index to use a new definition. Construct with +/// [`Collection::update_search_index`]. +#[must_use] +pub struct UpdateSearchIndex<'a> { + coll: CollRef<'a>, + name: String, + definition: Document, + options: Option, +} + +impl<'a> UpdateSearchIndex<'a> { + option_setters! { options: UpdateSearchIndexOptions; } +} + +#[action_impl] +impl<'a> Action for UpdateSearchIndex<'a> { + type Future = UpdateSearchIndexFuture; + + async fn execute(self) -> Result<()> { + let op = + operation::UpdateSearchIndex::new(self.coll.namespace(), self.name, self.definition); + self.coll.client().execute_operation(op, None).await + } +} + +/// Drops a specific search index. Construct with [`Collection::drop_search_index`]. +#[must_use] +pub struct DropSearchIndex<'a> { + coll: CollRef<'a>, + name: String, + options: Option, +} + +impl<'a> DropSearchIndex<'a> { + option_setters! { options: DropSearchIndexOptions; } +} + +#[action_impl] +impl<'a> Action for DropSearchIndex<'a> { + type Future = DropSearchIndexFuture; + + async fn execute(self) -> Result<()> { + let op = operation::DropSearchIndex::new(self.coll.namespace(), self.name); + self.coll.client().execute_operation(op, None).await + } +} + +/// Gets index information for one or more search indexes in a collection. +#[must_use] +pub struct ListSearchIndexes<'a> { + coll: CollRef<'a>, + name: Option, + agg_options: Option, + options: Option, +} + +impl<'a> ListSearchIndexes<'a> { + option_setters! { options: ListSearchIndexOptions; } + + /// Get information for the named index. + pub fn name(mut self, name: impl Into) -> Self { + self.name = Some(name.into()); + self + } + + /// Set aggregation options. + pub fn aggregate_options(mut self, value: AggregateOptions) -> Self { + self.agg_options = Some(value); + self + } +} + +#[action_impl(sync = crate::sync::Cursor)] +impl<'a> Action for ListSearchIndexes<'a> { + type Future = ListSearchIndexesFuture; + + async fn execute(self) -> Result> { + let mut inner = doc! {}; + if let Some(name) = self.name { + inner.insert("name", name); + } + self.coll + .clone_unconcerned() + .aggregate(vec![doc! { + "$listSearchIndexes": inner, + }]) + .with_options(self.agg_options) + .await + } +} diff --git a/src/action/session.rs b/src/action/session.rs index e310a2fd1..f243c5946 100644 --- a/src/action/session.rs +++ b/src/action/session.rs @@ -5,12 +5,13 @@ use crate::{ ClientSession, }; -use super::{action_impl, option_setters}; +use super::{action_impl, deeplink, option_setters}; impl Client { /// Starts a new [`ClientSession`]. /// - /// `await` will return `Result<`[`ClientSession`]`>`. + /// `await` will return d[`Result`]. + #[deeplink] pub fn start_session(&self) -> StartSession { StartSession { client: self, @@ -23,13 +24,14 @@ impl Client { impl crate::sync::Client { /// Starts a new [`ClientSession`]. /// - /// [run](StartSession::run) will return `Result<`[`ClientSession`]`>`. + /// [run](StartSession::run) will return d[`Result`]. + #[deeplink] pub fn start_session(&self) -> StartSession { self.async_client.start_session() } } -/// Starts a new [`ClientSession`]. Create by calling [`Client::start_session`]. +/// Starts a new [`ClientSession`]. Construct with [`Client::start_session`]. #[must_use] pub struct StartSession<'a> { client: &'a Client, @@ -44,19 +46,14 @@ impl<'a> StartSession<'a> { ); } -action_impl! { - impl<'a> Action for StartSession<'a> { - type Future = StartSessionFuture; +#[action_impl(sync = crate::sync::ClientSession)] +impl<'a> Action for StartSession<'a> { + type Future = StartSessionFuture; - async fn execute(self) -> Result { - if let Some(options) = &self.options { - options.validate()?; - } - Ok(ClientSession::new(self.client.clone(), self.options, false).await) - } - - fn sync_wrap(out) -> Result { - out.map(Into::into) + async fn execute(self) -> Result { + if let Some(options) = &self.options { + options.validate()?; } + Ok(ClientSession::new(self.client.clone(), self.options, false).await) } } diff --git a/src/action/shutdown.rs b/src/action/shutdown.rs index 915e48b01..cb05404b5 100644 --- a/src/action/shutdown.rs +++ b/src/action/shutdown.rs @@ -15,9 +15,10 @@ impl Client { /// /// ```rust /// # use mongodb::{Client, GridFsBucket, error::Result}; - /// async fn upload_data(bucket: &GridFsBucket) { - /// let stream = bucket.open_upload_stream("test", None); + /// async fn upload_data(bucket: &GridFsBucket) -> Result<()> { + /// let stream = bucket.open_upload_stream("test").await?; /// // .. write to the stream .. + /// # Ok(()) /// } /// /// # async fn run() -> Result<()> { @@ -37,7 +38,7 @@ impl Client { /// # async fn run() -> Result<()> { /// let client = Client::with_uri_str("mongodb://example.com").await?; /// let bucket = client.database("test").gridfs_bucket(None); - /// let stream = bucket.open_upload_stream("test", None); + /// let stream = bucket.open_upload_stream("test").await?; /// // .. write to the stream .. /// drop(stream); /// client.shutdown().await; @@ -77,15 +78,16 @@ impl crate::sync::Client { /// /// ```rust /// # use mongodb::{sync::{Client, gridfs::GridFsBucket}, error::Result}; - /// fn upload_data(bucket: &GridFsBucket) { - /// let stream = bucket.open_upload_stream("test", None); + /// fn upload_data(bucket: &GridFsBucket) -> Result<()> { + /// let stream = bucket.open_upload_stream("test").run()?; /// // .. write to the stream .. + /// # Ok(()) /// } /// /// # fn run() -> Result<()> { /// let client = Client::with_uri_str("mongodb://example.com")?; /// let bucket = client.database("test").gridfs_bucket(None); - /// upload_data(&bucket); + /// upload_data(&bucket)?; /// client.shutdown(); /// // Background cleanup work from `upload_data` is guaranteed to have run. /// # Ok(()) @@ -99,7 +101,7 @@ impl crate::sync::Client { /// # fn run() -> Result<()> { /// let client = Client::with_uri_str("mongodb://example.com")?; /// let bucket = client.database("test").gridfs_bucket(None); - /// let stream = bucket.open_upload_stream("test", None); + /// let stream = bucket.open_upload_stream("test").run()?; /// // .. write to the stream .. /// drop(stream); /// client.shutdown(); diff --git a/src/action/transaction.rs b/src/action/transaction.rs new file mode 100644 index 000000000..de560eda2 --- /dev/null +++ b/src/action/transaction.rs @@ -0,0 +1,235 @@ +use std::time::Duration; + +use crate::{ + client::options::TransactionOptions, + options::{ReadConcern, WriteConcern}, + selection_criteria::SelectionCriteria, + ClientSession, +}; + +use super::option_setters; + +impl ClientSession { + /// Starts a new transaction on this session. If no options are set, the session's + /// `defaultTransactionOptions` will be used. This session must be passed into each operation + /// within the transaction; otherwise, the operation will be executed outside of the + /// transaction. + /// + /// Errors returned from operations executed within a transaction may include a + /// [`crate::error::TRANSIENT_TRANSACTION_ERROR`] label. This label indicates that the entire + /// transaction can be retried with a reasonable expectation that it will succeed. + /// + /// Transactions on replica sets are supported on MongoDB 4.0+. Transactions on sharded + /// clusters are supported on MongoDB 4.2+. + /// + /// ```rust + /// # use mongodb::{bson::{doc, Document}, error::Result, Client, ClientSession}; + /// # + /// # async fn do_stuff() -> Result<()> { + /// # let client = Client::with_uri_str("mongodb://example.com").await?; + /// # let coll = client.database("foo").collection::("bar"); + /// # let mut session = client.start_session().await?; + /// session.start_transaction().await?; + /// let result = coll.insert_one(doc! { "x": 1 }).session(&mut session).await?; + /// session.commit_transaction().await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// `await` will return [`Result<()>`]. + pub fn start_transaction(&mut self) -> StartTransaction<&mut Self> { + StartTransaction { + session: self, + options: None, + } + } + + /// Commits the transaction that is currently active on this session. + /// + /// This method may return an error with a [`crate::error::UNKNOWN_TRANSACTION_COMMIT_RESULT`] + /// label. This label indicates that it is unknown whether the commit has satisfied the write + /// concern associated with the transaction. If an error with this label is returned, it is + /// safe to retry the commit until the write concern is satisfied or an error without the label + /// is returned. + /// + /// ```rust + /// # use mongodb::{bson::{doc, Document}, error::Result, Client, ClientSession}; + /// # + /// # async fn do_stuff() -> Result<()> { + /// # let client = Client::with_uri_str("mongodb://example.com").await?; + /// # let coll = client.database("foo").collection::("bar"); + /// # let mut session = client.start_session().await?; + /// session.start_transaction().await?; + /// let result = coll.insert_one(doc! { "x": 1 }).session(&mut session).await?; + /// session.commit_transaction().await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// `await` will return [`Result<()>`]. + pub fn commit_transaction(&mut self) -> CommitTransaction { + CommitTransaction { session: self } + } + + /// Aborts the transaction that is currently active on this session. Any open transaction will + /// be aborted automatically in the `Drop` implementation of `ClientSession`. + /// + /// ```rust + /// # use mongodb::{bson::{doc, Document}, error::Result, Client, ClientSession, Collection}; + /// # + /// # async fn do_stuff() -> Result<()> { + /// # let client = Client::with_uri_str("mongodb://example.com").await?; + /// # let coll = client.database("foo").collection::("bar"); + /// # let mut session = client.start_session().await?; + /// session.start_transaction().await?; + /// match execute_transaction(&coll, &mut session).await { + /// Ok(_) => session.commit_transaction().await?, + /// Err(_) => session.abort_transaction().await?, + /// } + /// # Ok(()) + /// # } + /// + /// async fn execute_transaction(coll: &Collection, session: &mut ClientSession) -> Result<()> { + /// coll.insert_one(doc! { "x": 1 }).session(&mut *session).await?; + /// coll.delete_one(doc! { "y": 2 }).session(&mut *session).await?; + /// Ok(()) + /// } + /// ``` + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// `await` will return [`Result<()>`]. + pub fn abort_transaction(&mut self) -> AbortTransaction { + AbortTransaction { session: self } + } +} + +#[cfg(feature = "sync")] +impl crate::sync::ClientSession { + /// Starts a new transaction on this session with the given `TransactionOptions`. If no options + /// are provided, the session's `defaultTransactionOptions` will be used. This session must + /// be passed into each operation within the transaction; otherwise, the operation will be + /// executed outside of the transaction. + /// + /// ```rust + /// # use mongodb::{bson::{doc, Document}, error::Result, sync::{Client, ClientSession}}; + /// # + /// # async fn do_stuff() -> Result<()> { + /// # let client = Client::with_uri_str("mongodb://example.com")?; + /// # let coll = client.database("foo").collection::("bar"); + /// # let mut session = client.start_session().run()?; + /// session.start_transaction().run()?; + /// let result = coll.insert_one(doc! { "x": 1 }).session(&mut session).run()?; + /// session.commit_transaction().run()?; + /// # Ok(()) + /// # } + /// ``` + /// + /// [`run`](StartTransaction::run) will return [`Result<()>`]. + pub fn start_transaction(&mut self) -> StartTransaction<&mut Self> { + StartTransaction { + session: self, + options: None, + } + } + + /// Commits the transaction that is currently active on this session. + /// + /// ```rust + /// # use mongodb::{bson::{doc, Document}, error::Result, sync::{Client, ClientSession}}; + /// # + /// # async fn do_stuff() -> Result<()> { + /// # let client = Client::with_uri_str("mongodb://example.com")?; + /// # let coll = client.database("foo").collection::("bar"); + /// # let mut session = client.start_session().run()?; + /// session.start_transaction().run()?; + /// let result = coll.insert_one(doc! { "x": 1 }).session(&mut session).run()?; + /// session.commit_transaction().run()?; + /// # Ok(()) + /// # } + /// ``` + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// [`run`](CommitTransaction::run) will return [`Result<()>`]. + pub fn commit_transaction(&mut self) -> CommitTransaction { + self.async_client_session.commit_transaction() + } + + /// Aborts the transaction that is currently active on this session. Any open transaction will + /// be aborted automatically in the `Drop` implementation of `ClientSession`. + /// + /// ```rust + /// # use mongodb::{bson::{doc, Document}, error::Result, sync::{Client, ClientSession, Collection}}; + /// # + /// # async fn do_stuff() -> Result<()> { + /// # let client = Client::with_uri_str("mongodb://example.com")?; + /// # let coll = client.database("foo").collection::("bar"); + /// # let mut session = client.start_session().run()?; + /// session.start_transaction().run()?; + /// match execute_transaction(coll, &mut session) { + /// Ok(_) => session.commit_transaction().run()?, + /// Err(_) => session.abort_transaction().run()?, + /// } + /// # Ok(()) + /// # } + /// + /// fn execute_transaction(coll: Collection, session: &mut ClientSession) -> Result<()> { + /// coll.insert_one(doc! { "x": 1 }).session(&mut *session).run()?; + /// coll.delete_one(doc! { "y": 2 }).session(&mut *session).run()?; + /// Ok(()) + /// } + /// ``` + /// + /// This operation will retry once upon failure if the connection and encountered error support + /// retryability. See the documentation + /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on + /// retryable writes. + /// + /// [`run`](AbortTransaction::run) will return [`Result<()>`]. + pub fn abort_transaction(&mut self) -> AbortTransaction { + self.async_client_session.abort_transaction() + } +} + +/// Start a new transaction. Construct with [`ClientSession::start_transaction`]. +#[must_use] +pub struct StartTransaction { + pub(crate) session: S, + pub(crate) options: Option, +} + +impl StartTransaction { + option_setters! { options: TransactionOptions; + read_concern: ReadConcern, + write_concern: WriteConcern, + selection_criteria: SelectionCriteria, + max_commit_time: Duration, + } +} + +/// Commits a currently-active transaction. Construct with [`ClientSession::commit_transaction`]. +#[must_use] +pub struct CommitTransaction<'a> { + pub(crate) session: &'a mut ClientSession, +} + +/// Abort the currently active transaction on a session. Construct with +/// [`ClientSession::abort_transaction`]. +#[must_use] +pub struct AbortTransaction<'a> { + pub(crate) session: &'a mut ClientSession, +} + +// Action impls at src/client/session/action.rs diff --git a/src/action/update.rs b/src/action/update.rs index 7fe3586d9..fc6913d62 100644 --- a/src/action/update.rs +++ b/src/action/update.rs @@ -11,9 +11,12 @@ use crate::{ Collection, }; -use super::{action_impl, option_setters, CollRef}; +use super::{action_impl, deeplink, option_setters, CollRef}; -impl Collection { +impl Collection +where + T: Send + Sync, +{ /// Updates all documents matching `query` in the collection. /// /// Both `Document` and `Vec` implement `Into`, so either can be @@ -21,7 +24,8 @@ impl Collection { /// in MongoDB 4.2+. See the official MongoDB /// [documentation](https://www.mongodb.com/docs/manual/reference/command/update/#behavior) for more information on specifying updates. /// - /// `await` will return `Result`. + /// `await` will return d[`Result`]. + #[deeplink] pub fn update_many(&self, query: Document, update: impl Into) -> Update { Update { coll: CollRef::new(self), @@ -45,7 +49,8 @@ impl Collection { /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on /// retryable writes. /// - /// `await` will return `Result`. + /// `await` will return d[`Result`]. + #[deeplink] pub fn update_one(&self, query: Document, update: impl Into) -> Update { Update { coll: CollRef::new(self), @@ -58,8 +63,11 @@ impl Collection { } } -#[cfg(any(feature = "sync", feature = "tokio-sync"))] -impl crate::sync::Collection { +#[cfg(feature = "sync")] +impl crate::sync::Collection +where + T: Send + Sync, +{ /// Updates all documents matching `query` in the collection. /// /// Both `Document` and `Vec` implement `Into`, so either can be @@ -67,7 +75,8 @@ impl crate::sync::Collection { /// in MongoDB 4.2+. See the official MongoDB /// [documentation](https://www.mongodb.com/docs/manual/reference/command/update/#behavior) for more information on specifying updates. /// - /// [`run`](Update::run) will return `Result`. + /// [`run`](Update::run) will return d[`Result`]. + #[deeplink] pub fn update_many(&self, query: Document, update: impl Into) -> Update { self.async_collection.update_many(query, update) } @@ -84,7 +93,8 @@ impl crate::sync::Collection { /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on /// retryable writes. /// - /// [`run`](Update::run) will return `Result`. + /// [`run`](Update::run) will return d[`Result`]. + #[deeplink] pub fn update_one(&self, query: Document, update: impl Into) -> Update { self.async_collection.update_one(query, update) } @@ -114,32 +124,30 @@ impl<'a> Update<'a> { comment: Bson, ); - /// Runs the operation using the provided session. + /// Use the provided session when running the operation. pub fn session(mut self, value: impl Into<&'a mut ClientSession>) -> Self { self.session = Some(value.into()); self } } -action_impl! { - impl<'a> Action for Update<'a> { - type Future = UpdateFuture; +#[action_impl] +impl<'a> Action for Update<'a> { + type Future = UpdateFuture; - async fn execute(mut self) -> Result { - if let UpdateModifications::Document(d) = &self.update { - crate::bson_util::update_document_check(d)?; - } - resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; - - let op = Op::with_update( - self.coll.namespace(), - self.query, - self.update, - self.multi, - self.options, - self.coll.human_readable_serialization(), - ); - self.coll.client().execute_operation(op, self.session).await + async fn execute(mut self) -> Result { + if let UpdateModifications::Document(d) = &self.update { + crate::bson_util::update_document_check(d)?; } + resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; + + let op = Op::with_update( + self.coll.namespace(), + self.query, + self.update, + self.multi, + self.options, + ); + self.coll.client().execute_operation(op, self.session).await } } diff --git a/src/action/watch.rs b/src/action/watch.rs index 4f5dbaa42..1ecc8038b 100644 --- a/src/action/watch.rs +++ b/src/action/watch.rs @@ -2,7 +2,7 @@ use std::time::Duration; use bson::{Bson, Document, Timestamp}; -use super::{action_impl, option_setters, ExplicitSession, ImplicitSession}; +use super::{action_impl, deeplink, option_setters, ExplicitSession, ImplicitSession}; use crate::{ change_stream::{ event::{ChangeStreamEvent, ResumeToken}, @@ -41,9 +41,10 @@ impl Client { /// If the pipeline alters the structure of the returned events, the parsed type will need to be /// changed via [`ChangeStream::with_type`]. /// - /// `await` will return `Result<`[`ChangeStream`]`<`[`ChangeStreamEvent`]`>>` or - /// `Result<`[`SessionChangeStream`]`<`[`ChangeStreamEvent`]`>>` if a + /// `await` will return d[`Result>>`] or + /// d[`Result>>`] if a /// [`ClientSession`] has been provided. + #[deeplink] pub fn watch(&self) -> Watch { Watch::new_cluster(self) } @@ -68,9 +69,10 @@ impl Database { /// If the pipeline alters the structure of the returned events, the parsed type will need to be /// changed via [`ChangeStream::with_type`]. /// - /// `await` will return `Result<`[`ChangeStream`]`<`[`ChangeStreamEvent`]`>>` or - /// `Result<`[`SessionChangeStream`]`<`[`ChangeStreamEvent`]`>>` if a + /// `await` will return d[`Result>>`] or + /// d[`Result>>`] if a /// [`ClientSession`] has been provided. + #[deeplink] pub fn watch(&self) -> Watch { Watch::new( self.client(), @@ -79,7 +81,10 @@ impl Database { } } -impl Collection { +impl Collection +where + T: Send + Sync, +{ /// Starts a new [`ChangeStream`](change_stream/struct.ChangeStream.html) that receives events /// for all changes in this collection. A /// [`ChangeStream`](change_stream/struct.ChangeStream.html) cannot be started on system @@ -91,9 +96,10 @@ impl Collection { /// Change streams require either a "majority" read concern or no read concern. Anything else /// will cause a server error. /// - /// `await` will return `Result<`[`ChangeStream`]`<`[`ChangeStreamEvent`]`>>` or - /// `Result<`[`SessionChangeStream`]`<`[`ChangeStreamEvent`]`>>` if a + /// `await` will return d[`Result>>`] or + /// d[`Result>>`] if a /// [`ClientSession`] has been provided. + #[deeplink] pub fn watch(&self) -> Watch { Watch::new(self.client(), self.namespace().into()) } @@ -133,7 +139,10 @@ impl crate::sync::Database { } #[cfg(feature = "sync")] -impl crate::sync::Collection { +impl crate::sync::Collection +where + T: Send + Sync, +{ /// Starts a new [`ChangeStream`](change_stream/struct.ChangeStream.html) that receives events /// for all changes in this collection. A /// [`ChangeStream`](change_stream/struct.ChangeStream.html) cannot be started on system @@ -243,65 +252,51 @@ impl<'a> Watch<'a, ImplicitSession> { } } -action_impl! { - impl<'a> Action for Watch<'a, ImplicitSession> { - type Future = WatchFuture; +#[action_impl(sync = crate::sync::ChangeStream>)] +impl<'a> Action for Watch<'a, ImplicitSession> { + type Future = WatchFuture; - async fn execute(mut self) -> Result>> { - resolve_options!( - self.client, - self.options, - [read_concern, selection_criteria] - ); - if self.cluster { - self.options - .get_or_insert_with(Default::default) - .all_changes_for_cluster = Some(true); - } - self.client - .execute_watch(self.pipeline, self.options, self.target, None) - .await - } - - fn sync_wrap(out) -> Result>> { - out.map(crate::sync::ChangeStream::new) + async fn execute(mut self) -> Result>> { + resolve_options!( + self.client, + self.options, + [read_concern, selection_criteria] + ); + if self.cluster { + self.options + .get_or_insert_with(Default::default) + .all_changes_for_cluster = Some(true); } + self.client + .execute_watch(self.pipeline, self.options, self.target, None) + .await } } -action_impl! { - impl<'a> Action for Watch<'a, ExplicitSession<'a>> { - type Future = WatchSessionFuture; - - async fn execute(mut self) -> Result>> { - resolve_read_concern_with_session!( - self.client, - self.options, - Some(&mut *self.session.0) - )?; - resolve_selection_criteria_with_session!( - self.client, - self.options, - Some(&mut *self.session.0) - )?; - if self.cluster { - self.options - .get_or_insert_with(Default::default) - .all_changes_for_cluster = Some(true); - } - self.client - .execute_watch_with_session( - self.pipeline, - self.options, - self.target, - None, - self.session.0, - ) - .await - } +#[action_impl(sync = crate::sync::SessionChangeStream>)] +impl<'a> Action for Watch<'a, ExplicitSession<'a>> { + type Future = WatchSessionFuture; - fn sync_wrap(out) -> Result>> { - out.map(crate::sync::SessionChangeStream::new) + async fn execute(mut self) -> Result>> { + resolve_read_concern_with_session!(self.client, self.options, Some(&mut *self.session.0))?; + resolve_selection_criteria_with_session!( + self.client, + self.options, + Some(&mut *self.session.0) + )?; + if self.cluster { + self.options + .get_or_insert_with(Default::default) + .all_changes_for_cluster = Some(true); } + self.client + .execute_watch_with_session( + self.pipeline, + self.options, + self.target, + None, + self.session.0, + ) + .await } } diff --git a/src/bson_util.rs b/src/bson_util.rs index 7de867c3a..436a1582f 100644 --- a/src/bson_util.rs +++ b/src/bson_util.rs @@ -14,12 +14,14 @@ use crate::{ RawBsonRef, RawDocumentBuf, }, + checked::Checked, error::{ErrorKind, Result}, runtime::SyncLittleEndianRead, }; /// Coerce numeric types into an `i64` if it would be lossless to do so. If this Bson is not numeric /// or the conversion would be lossy (e.g. 1.5 -> 1), this returns `None`. +#[allow(clippy::cast_possible_truncation)] pub(crate) fn get_int(val: &Bson) -> Option { match *val { Bson::Int32(i) => Some(i64::from(i)), @@ -42,6 +44,7 @@ pub(crate) fn get_int_raw(val: RawBsonRef<'_>) -> Option { /// Coerce numeric types into an `u64` if it would be lossless to do so. If this Bson is not numeric /// or the conversion would be lossy (e.g. 1.5 -> 1), this returns `None`. +#[allow(clippy::cast_possible_truncation)] pub(crate) fn get_u64(val: &Bson) -> Option { match *val { Bson::Int32(i) => u64::try_from(i).ok(), @@ -88,13 +91,13 @@ pub(crate) fn update_document_check(update: &Document) -> Result<()> { } /// The size in bytes of the provided document's entry in a BSON array at the given index. -pub(crate) fn array_entry_size_bytes(index: usize, doc_len: usize) -> usize { +pub(crate) fn array_entry_size_bytes(index: usize, doc_len: usize) -> Result { // * type (1 byte) // * number of decimal digits in key // * null terminator for the key (1 byte) // * size of value - 1 + num_decimal_digits(index) + 1 + doc_len + (Checked::new(1) + num_decimal_digits(index) + 1 + doc_len).get() } pub(crate) fn vec_to_raw_array_buf(docs: Vec) -> RawArrayBuf { diff --git a/src/change_stream.rs b/src/change_stream.rs index 428158ba6..86c2bb0b7 100644 --- a/src/change_stream.rs +++ b/src/change_stream.rs @@ -57,7 +57,7 @@ use crate::{ /// let mut change_stream = coll.watch().await?; /// let coll_ref = coll.clone(); /// task::spawn(async move { -/// coll_ref.insert_one(doc! { "x": 1 }, None).await; +/// coll_ref.insert_one(doc! { "x": 1 }).await; /// }); /// while let Some(event) = change_stream.next().await.transpose()? { /// println!("operation performed: {:?}, document: {:?}", event.operation_type, event.full_document); diff --git a/src/change_stream/session.rs b/src/change_stream/session.rs index e292b44af..0af9e2fa4 100644 --- a/src/change_stream/session.rs +++ b/src/change_stream/session.rs @@ -85,7 +85,7 @@ where /// let mut cs = coll.watch().session(&mut session).await?; /// while let Some(event) = cs.next(&mut session).await? { /// let id = bson::to_bson(&event.id)?; - /// other_coll.insert_one_with_session(doc! { "id": id }, None, &mut session).await?; + /// other_coll.insert_one(doc! { "id": id }).session(&mut session).await?; /// } /// # Ok::<(), mongodb::error::Error>(()) /// # }; diff --git a/src/checked.rs b/src/checked.rs new file mode 100644 index 000000000..ece4d2a59 --- /dev/null +++ b/src/checked.rs @@ -0,0 +1,502 @@ +// Modified from https://github.com/zeta12ti/Checked/blob/master/src/num.rs +// Original license: +// MIT License +// +// Copyright (c) 2017 zeta12ti +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +use std::{cmp::Ordering, convert::TryFrom, fmt, ops::*}; + +/// The Checked type. See the [module level documentation for more.](index.html) +#[derive(PartialEq, Eq, Clone, Copy, Hash)] +pub struct Checked(pub Option); + +impl Checked { + /// Creates a new Checked instance from some sort of integer. + #[inline] + pub fn new(x: T) -> Checked { + Checked(Some(x)) + } + + pub fn try_from(value: F) -> crate::error::Result + where + T: TryFrom, + T::Error: std::fmt::Display, + { + value + .try_into() + .map(|v| Self(Some(v))) + .map_err(|e| crate::error::Error::invalid_argument(format! {"{}", e})) + } + + pub fn get(self) -> crate::error::Result { + self.0 + .ok_or_else(|| crate::error::Error::invalid_argument("checked arithmetic failure")) + } + + pub fn try_into(self) -> crate::error::Result + where + T: TryInto, + T::Error: std::fmt::Display, + { + self.get().and_then(|v| { + v.try_into() + .map_err(|e| crate::error::Error::invalid_argument(format!("{}", e))) + }) + } +} + +// The derived Default only works if T has Default +// Even though this is what it would be anyway +// May change this to T's default (if it has one) +impl Default for Checked { + #[inline] + fn default() -> Checked { + Checked(None) + } +} + +impl fmt::Debug for Checked { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match **self { + Some(ref x) => x.fmt(f), + None => "overflow".fmt(f), + } + } +} + +impl fmt::Display for Checked { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match **self { + Some(ref x) => x.fmt(f), + None => "overflow".fmt(f), + } + } +} + +// I'd like to do +// `impl From where T: From for Checked`` +// in the obvious way, but that "conflicts" with the default `impl From for T`. +// This would subsume both the below Froms since Option has the right From impl. +impl From for Checked { + #[inline] + fn from(x: T) -> Checked { + Checked(Some(x)) + } +} + +impl From> for Checked { + #[inline] + fn from(x: Option) -> Checked { + Checked(x) + } +} + +impl Deref for Checked { + type Target = Option; + + #[inline] + fn deref(&self) -> &Option { + &self.0 + } +} + +impl DerefMut for Checked { + #[inline] + fn deref_mut(&mut self) -> &mut Option { + &mut self.0 + } +} + +impl PartialOrd for Checked { + fn partial_cmp(&self, other: &Checked) -> Option { + // I'm not really sure why we can't match **self etc. here. + // Even with refs everywhere it complains + // Note what happens in this implementation: + // we take the reference self, and call deref (the method) on it + // By Deref coercion, self gets derefed to a Checked + // Now Checked's deref gets called, returning a &Option + // That's what gets matched + match (self.deref(), other.deref()) { + (Some(x), Some(y)) => PartialOrd::partial_cmp(x, y), + _ => None, + } + } +} + +// implements the unary operator `op &T` +// based on `op T` where `T` is expected to be `Copy`able +macro_rules! forward_ref_unop { + (impl $imp:ident, $method:ident for $t:ty {}) => { + impl<'a> $imp for &'a $t { + type Output = <$t as $imp>::Output; + + #[inline] + fn $method(self) -> <$t as $imp>::Output { + $imp::$method(*self) + } + } + }; +} + +// implements binary operators "&T op U", "T op &U", "&T op &U" +// based on "T op U" where T and U are expected to be `Copy`able +macro_rules! forward_ref_binop { + (impl $imp:ident, $method:ident for $t:ty, $u:ty {}) => { + impl<'a> $imp<$u> for &'a $t { + type Output = <$t as $imp<$u>>::Output; + + #[inline] + fn $method(self, other: $u) -> <$t as $imp<$u>>::Output { + $imp::$method(*self, other) + } + } + + impl<'a> $imp<&'a $u> for $t { + type Output = <$t as $imp<$u>>::Output; + + #[inline] + fn $method(self, other: &'a $u) -> <$t as $imp<$u>>::Output { + $imp::$method(self, *other) + } + } + + impl<'a, 'b> $imp<&'a $u> for &'b $t { + type Output = <$t as $imp<$u>>::Output; + + #[inline] + fn $method(self, other: &'a $u) -> <$t as $imp<$u>>::Output { + $imp::$method(*self, *other) + } + } + }; +} + +macro_rules! impl_sh { + ($t:ident, $f:ident) => { + impl Shl> for Checked<$t> { + type Output = Checked<$t>; + + fn shl(self, other: Checked<$f>) -> Checked<$t> { + match (*self, *other) { + (Some(x), Some(y)) => Checked(x.checked_shl(y)), + _ => Checked(None), + } + } + } + + impl Shl<$f> for Checked<$t> { + type Output = Checked<$t>; + + fn shl(self, other: $f) -> Checked<$t> { + match *self { + Some(x) => Checked(x.checked_shl(other)), + None => Checked(None), + } + } + } + + forward_ref_binop! { impl Shl, shl for Checked<$t>, Checked<$f> {} } + forward_ref_binop! { impl Shl, shl for Checked<$t>, $f {} } + + impl ShlAssign<$f> for Checked<$t> { + #[inline] + fn shl_assign(&mut self, other: $f) { + *self = *self << other; + } + } + + impl ShlAssign> for Checked<$t> { + #[inline] + fn shl_assign(&mut self, other: Checked<$f>) { + *self = *self << other; + } + } + + impl Shr> for Checked<$t> { + type Output = Checked<$t>; + + fn shr(self, other: Checked<$f>) -> Checked<$t> { + match (*self, *other) { + (Some(x), Some(y)) => Checked(x.checked_shr(y)), + _ => Checked(None), + } + } + } + + impl Shr<$f> for Checked<$t> { + type Output = Checked<$t>; + + fn shr(self, other: $f) -> Checked<$t> { + match *self { + Some(x) => Checked(x.checked_shr(other)), + None => Checked(None), + } + } + } + + forward_ref_binop! { impl Shr, shr for Checked<$t>, Checked<$f> {} } + forward_ref_binop! { impl Shr, shr for Checked<$t>, $f {} } + + impl ShrAssign<$f> for Checked<$t> { + #[inline] + fn shr_assign(&mut self, other: $f) { + *self = *self >> other; + } + } + + impl ShrAssign> for Checked<$t> { + #[inline] + fn shr_assign(&mut self, other: Checked<$f>) { + *self = *self >> other; + } + } + }; +} + +macro_rules! impl_sh_reverse { + ($t:ident, $f:ident) => { + impl Shl> for $f { + type Output = Checked<$f>; + + fn shl(self, other: Checked<$t>) -> Checked<$f> { + match *other { + Some(x) => Checked(self.checked_shl(x)), + None => Checked(None), + } + } + } + + forward_ref_binop! { impl Shl, shl for $f, Checked<$t> {} } + + impl Shr> for $f { + type Output = Checked<$f>; + + fn shr(self, other: Checked<$t>) -> Checked<$f> { + match *other { + Some(x) => Checked(self.checked_shr(x)), + None => Checked(None), + } + } + } + + forward_ref_binop! { impl Shr, shr for $f, Checked<$t> {} } + }; +} + +macro_rules! impl_sh_all { + ($($t:ident)*) => ($( + // When checked_shX is added for other shift sizes, uncomment some of these. + // impl_sh! { $t, u8 } + // impl_sh! { $t, u16 } + impl_sh! { $t, u32 } + //impl_sh! { $t, u64 } + //impl_sh! { $t, usize } + + //impl_sh! { $t, i8 } + //impl_sh! { $t, i16 } + //impl_sh! { $t, i32 } + //impl_sh! { $t, i64 } + //impl_sh! { $t, isize } + + // impl_sh_reverse! { u8, $t } + // impl_sh_reverse! { u16, $t } + impl_sh_reverse! { u32, $t } + //impl_sh_reverse! { u64, $t } + //impl_sh_reverse! { usize, $t } + + //impl_sh_reverse! { i8, $t } + //impl_sh_reverse! { i16, $t } + //impl_sh_reverse! { i32, $t } + //impl_sh_reverse! { i64, $t } + //impl_sh_reverse! { isize, $t } + )*) +} + +impl_sh_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } + +// implements unary operators for checked types +macro_rules! impl_unop { + (impl $imp:ident, $method:ident, $checked_method:ident for $t:ty {}) => { + impl $imp for Checked<$t> { + type Output = Checked<$t>; + + fn $method(self) -> Checked<$t> { + match *self { + Some(x) => Checked(x.$checked_method()), + None => Checked(None), + } + } + } + + forward_ref_unop! { impl $imp, $method for Checked<$t> {} } + }; +} + +// implements unary operators for checked types (with no checked method) +macro_rules! impl_unop_unchecked { + (impl $imp:ident, $method:ident for $t:ty {$op:tt}) => { + impl $imp for Checked<$t> { + type Output = Checked<$t>; + + fn $method(self) -> Checked<$t> { + match *self { + Some(x) => Checked(Some($op x)), + None => Checked(None) + } + } + } + + forward_ref_unop! { impl $imp, $method for Checked<$t> {} } + } +} + +// implements binary operators for checked types +macro_rules! impl_binop { + (impl $imp:ident, $method:ident, $checked_method:ident for $t:ty {}) => { + impl $imp for Checked<$t> { + type Output = Checked<$t>; + + fn $method(self, other: Checked<$t>) -> Checked<$t> { + match (*self, *other) { + (Some(x), Some(y)) => Checked(x.$checked_method(y)), + _ => Checked(None), + } + } + } + + impl $imp<$t> for Checked<$t> { + type Output = Checked<$t>; + + fn $method(self, other: $t) -> Checked<$t> { + match *self { + Some(x) => Checked(x.$checked_method(other)), + _ => Checked(None), + } + } + } + + impl $imp> for $t { + type Output = Checked<$t>; + + fn $method(self, other: Checked<$t>) -> Checked<$t> { + match *other { + Some(x) => Checked(self.$checked_method(x)), + None => Checked(None), + } + } + } + + forward_ref_binop! { impl $imp, $method for Checked<$t>, Checked<$t> {} } + forward_ref_binop! { impl $imp, $method for Checked<$t>, $t {} } + forward_ref_binop! { impl $imp, $method for $t, Checked<$t> {} } + }; +} + +// implements binary operators for checked types (no checked method) +macro_rules! impl_binop_unchecked { + (impl $imp:ident, $method:ident for $t:ty {$op:tt}) => { + impl $imp for Checked<$t> { + type Output = Checked<$t>; + + fn $method(self, other: Checked<$t>) -> Checked<$t> { + match (*self, *other) { + (Some(x), Some(y)) => Checked(Some(x $op y)), + _ => Checked(None), + } + } + } + + impl $imp<$t> for Checked<$t> { + type Output = Checked<$t>; + + fn $method(self, other: $t) -> Checked<$t> { + match *self { + Some(x) => Checked(Some(x $op other)), + _ => Checked(None), + } + } + } + + impl $imp> for $t { + type Output = Checked<$t>; + + fn $method(self, other: Checked<$t>) -> Checked<$t> { + match *other { + Some(x) => Checked(Some(self $op x)), + None => Checked(None), + } + } + } + + forward_ref_binop! { impl $imp, $method for Checked<$t>, Checked<$t> {} } + forward_ref_binop! { impl $imp, $method for Checked<$t>, $t {} } + forward_ref_binop! { impl $imp, $method for $t, Checked<$t> {} } + } +} + +// implements assignment operators for checked types +macro_rules! impl_binop_assign { + (impl $imp:ident, $method:ident for $t:ty {$op:tt}) => { + impl $imp for Checked<$t> { + #[inline] + fn $method(&mut self, other: Checked<$t>) { + *self = *self $op other; + } + } + + impl $imp<$t> for Checked<$t> { + #[inline] + fn $method(&mut self, other: $t) { + *self = *self $op other; + } + } + }; +} + +macro_rules! checked_impl { + ($($t:ty)*) => { + $( + impl_binop! { impl Add, add, checked_add for $t {} } + impl_binop_assign! { impl AddAssign, add_assign for $t {+} } + impl_binop! { impl Sub, sub, checked_sub for $t {} } + impl_binop_assign! { impl SubAssign, sub_assign for $t {-} } + impl_binop! { impl Mul, mul, checked_mul for $t {} } + impl_binop_assign! { impl MulAssign, mul_assign for $t {*} } + impl_binop! { impl Div, div, checked_div for $t {} } + impl_binop_assign! { impl DivAssign, div_assign for $t {/} } + impl_binop! { impl Rem, rem, checked_rem for $t {} } + impl_binop_assign! { impl RemAssign, rem_assign for $t {%} } + impl_unop_unchecked! { impl Not, not for $t {!} } + impl_binop_unchecked! { impl BitXor, bitxor for $t {^} } + impl_binop_assign! { impl BitXorAssign, bitxor_assign for $t {^} } + impl_binop_unchecked! { impl BitOr, bitor for $t {|} } + impl_binop_assign! { impl BitOrAssign, bitor_assign for $t {|} } + impl_binop_unchecked! { impl BitAnd, bitand for $t {&} } + impl_binop_assign! { impl BitAndAssign, bitand_assign for $t {&} } + impl_unop! { impl Neg, neg, checked_neg for $t {} } + + )* + }; +} + +checked_impl! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } diff --git a/src/client/action/perf.rs b/src/client/action/perf.rs index a026614b3..725080c1f 100644 --- a/src/client/action/perf.rs +++ b/src/client/action/perf.rs @@ -1,21 +1,20 @@ use crate::action::action_impl; -action_impl! { - impl<'a> Action for crate::action::WarmConnectionPool<'a> { - type Future = WarmConnectionPoolFuture; +#[action_impl] +impl<'a> Action for crate::action::WarmConnectionPool<'a> { + type Future = WarmConnectionPoolFuture; - async fn execute(self) -> () { - if !self - .client - .inner - .options - .min_pool_size - .map_or(false, |s| s > 0) - { - // No-op when min_pool_size is zero. - return; - } - self.client.inner.topology.warm_pool().await; + async fn execute(self) -> () { + if !self + .client + .inner + .options + .min_pool_size + .map_or(false, |s| s > 0) + { + // No-op when min_pool_size is zero. + return; } + self.client.inner.topology.warm_pool().await; } } diff --git a/src/client/action/shutdown.rs b/src/client/action/shutdown.rs index 904b4ecc3..a672b26c8 100644 --- a/src/client/action/shutdown.rs +++ b/src/client/action/shutdown.rs @@ -4,32 +4,31 @@ use futures_util::future::join_all; use crate::action::action_impl; -action_impl! { - impl Action for crate::action::Shutdown { - type Future = ShutdownFuture; +#[action_impl] +impl Action for crate::action::Shutdown { + type Future = ShutdownFuture; - async fn execute(self) -> () { - if !self.immediate { - // Subtle bug: if this is inlined into the `join_all(..)` call, Rust will extend the - // lifetime of the temporary unnamed `MutexLock` until the end of the *statement*, - // causing the lock to be held for the duration of the join, which deadlocks. - let pending = self - .client - .inner - .shutdown - .pending_drops - .lock() - .unwrap() - .extract(); - join_all(pending).await; - } - self.client.inner.topology.shutdown().await; - // This has to happen last to allow pending cleanup to execute commands. - self.client + async fn execute(self) -> () { + if !self.immediate { + // Subtle bug: if this is inlined into the `join_all(..)` call, Rust will extend the + // lifetime of the temporary unnamed `MutexLock` until the end of the *statement*, + // causing the lock to be held for the duration of the join, which deadlocks. + let pending = self + .client .inner .shutdown - .executed - .store(true, Ordering::SeqCst); + .pending_drops + .lock() + .unwrap() + .extract(); + join_all(pending).await; } + self.client.inner.topology.shutdown().await; + // This has to happen last to allow pending cleanup to execute commands. + self.client + .inner + .shutdown + .executed + .store(true, Ordering::SeqCst); } } diff --git a/src/client/auth.rs b/src/client/auth.rs index 2476fced3..c20fc1d6c 100644 --- a/src/client/auth.rs +++ b/src/client/auth.rs @@ -194,6 +194,8 @@ impl AuthMechanism { authentication", )); } + // TODO RUST-1660: Handle specific provider validation, perhaps also do Azure as + // part of this ticket. Specific providers will add predefined oidc_callback here if credential .source .as_ref() @@ -208,6 +210,15 @@ impl AuthMechanism { "password must not be set for MONGODB-OIDC authentication", )); } + if let Some(allowed_hosts) = credential + .mechanism_properties + .as_ref() + .and_then(|p| p.get("ALLOWED_HOSTS")) + { + allowed_hosts + .as_array() + .ok_or_else(|| Error::invalid_argument("ALLOWED_HOSTS must be an array"))?; + } Ok(()) } _ => Ok(()), @@ -267,7 +278,9 @@ impl AuthMechanism { x509::build_speculative_client_first(credential), )))), Self::Plain => Ok(None), - Self::MongoDbOidc => Ok(None), + Self::MongoDbOidc => Ok(Some(ClientFirst::Oidc(Box::new( + oidc::build_speculative_client_first(credential), + )))), #[cfg(feature = "aws-auth")] AuthMechanism::MongoDbAws => Ok(None), AuthMechanism::MongoDbCr => Err(ErrorKind::Authentication { @@ -320,7 +333,45 @@ impl AuthMechanism { } .into()), AuthMechanism::MongoDbOidc => { - oidc::authenticate_stream(stream, credential, server_api).await + oidc::authenticate_stream(stream, credential, server_api, None).await + } + _ => Err(ErrorKind::Authentication { + message: format!("Authentication mechanism {:?} not yet implemented.", self), + } + .into()), + } + } + + pub(crate) async fn reauthenticate_stream( + &self, + stream: &mut Connection, + credential: &Credential, + server_api: Option<&ServerApi>, + ) -> Result<()> { + self.validate_credential(credential)?; + + match self { + AuthMechanism::ScramSha1 + | AuthMechanism::ScramSha256 + | AuthMechanism::MongoDbX509 + | AuthMechanism::Plain + | AuthMechanism::MongoDbCr => Err(ErrorKind::Authentication { + message: format!( + "Reauthentication for authentication mechanism {:?} is not supported.", + self + ), + } + .into()), + #[cfg(feature = "aws-auth")] + AuthMechanism::MongoDbAws => Err(ErrorKind::Authentication { + message: format!( + "Reauthentication for authentication mechanism {:?} is not supported.", + self + ), + } + .into()), + AuthMechanism::MongoDbOidc => { + oidc::reauthenticate_stream(stream, credential, server_api).await } _ => Err(ErrorKind::Authentication { message: format!("Authentication mechanism {:?} not yet implemented.", self), @@ -342,7 +393,6 @@ impl FromStr for AuthMechanism { GSSAPI_STR => Ok(AuthMechanism::Gssapi), PLAIN_STR => Ok(AuthMechanism::Plain), MONGODB_OIDC_STR => Ok(AuthMechanism::MongoDbOidc), - #[cfg(feature = "aws-auth")] MONGODB_AWS_STR => Ok(AuthMechanism::MongoDbAws), #[cfg(not(feature = "aws-auth"))] @@ -389,11 +439,15 @@ pub struct Credential { /// Additional properties for the given mechanism. pub mechanism_properties: Option, - /// The token callbacks for OIDC authentication. - /// TODO RUST-1497: make this `pub` + /// The token callback for OIDC authentication. + // TODO RUST-1497: make this `pub` + // Credential::builder().oidc_callback(oidc::Callback::human(...)).build() + // the name of the field here does not well encompass what this field actually is since + // it contains all the OIDC state information, not just the callback, but it conforms + // to how a user would interact with it. #[serde(skip)] #[derivative(Debug = "ignore", PartialEq = "ignore")] - pub(crate) oidc_callbacks: Option, + pub(crate) oidc_callback: Option, } impl Credential { @@ -415,7 +469,7 @@ impl Credential { } } - /// Attempts to authenticate a stream according this credential, returning an error + /// Attempts to authenticate a stream according to this credential, returning an error /// result on failure. A mechanism may be negotiated if one is not provided as part of the /// credential. pub(crate) async fn authenticate_stream( @@ -444,6 +498,9 @@ impl Credential { FirstRound::X509(server_first) => { x509::authenticate_stream(conn, self, server_api, server_first).await } + FirstRound::Oidc(server_first) => { + oidc::authenticate_stream(conn, self, server_api, server_first).await + } }; } @@ -502,6 +559,7 @@ impl Debug for Credential { pub(crate) enum ClientFirst { Scram(ScramVersion, scram::ClientFirst), X509(Box), + Oidc(Box), } impl ClientFirst { @@ -509,6 +567,7 @@ impl ClientFirst { match self { Self::Scram(version, client_first) => client_first.to_command(version).body, Self::X509(command) => command.body.clone(), + Self::Oidc(command) => command.body.clone(), } } @@ -522,6 +581,7 @@ impl ClientFirst { }, ), Self::X509(..) => FirstRound::X509(server_first), + Self::Oidc(..) => FirstRound::Oidc(server_first), } } } @@ -532,6 +592,7 @@ impl ClientFirst { pub(crate) enum FirstRound { Scram(ScramVersion, scram::FirstRound), X509(Document), + Oidc(Document), } pub(crate) fn generate_nonce_bytes() -> [u8; 32] { diff --git a/src/client/auth/oidc.rs b/src/client/auth/oidc.rs index 7f264c57a..62565d1d0 100644 --- a/src/client/auth/oidc.rs +++ b/src/client/auth/oidc.rs @@ -1,10 +1,8 @@ +use serde::Deserialize; use std::{ - sync::Arc, + sync::{Arc, RwLock}, time::{Duration, Instant}, }; - -use bson::rawdoc; -use serde::Deserialize; use typed_builder::TypedBuilder; use crate::{ @@ -13,54 +11,137 @@ use crate::{ sasl::{SaslResponse, SaslStart}, AuthMechanism, }, - options::ServerApi, + options::{ServerAddress, ServerApi}, }, - cmap::Connection, + cmap::{Command, Connection}, error::{Error, Result}, BoxFuture, }; +use bson::{doc, rawdoc, Document}; use super::{sasl::SaslContinue, Credential, MONGODB_OIDC_STR}; +const HUMAN_CALLBACK_TIMEOUT: Duration = Duration::from_secs(5 * 60); +const MACHINE_CALLBACK_TIMEOUT: Duration = Duration::from_secs(60); +const MACHINE_INVALIDATE_SLEEP_TIMEOUT: Duration = Duration::from_millis(100); +const API_VERSION: u32 = 1; +const DEFAULT_ALLOWED_HOSTS: &[&str] = &[ + "*.mongodb.net", + "*.mongodb-qa.net", + "*.mongodb-dev.net", + "*.mongodbgov.net", + "localhost", + "127.0.0.1", + "::1", +]; + /// The user-supplied callbacks for OIDC authentication. #[derive(Clone)] -pub struct Callbacks { - inner: Arc, +pub struct State { + callback: Callback, + cache: Arc>, +} + +#[derive(Clone)] +#[non_exhaustive] +pub struct Callback { + inner: Arc, + kind: CallbackKind, +} + +#[non_exhaustive] +#[derive(Clone, Copy)] +enum CallbackKind { + Human, + Machine, } -impl Callbacks { - /// Create a new instance with a token request callback. - pub fn new(on_request: F) -> Self +// TODO RUST-1497: These will no longer be dead_code +#[allow(dead_code)] +impl Callback { + fn new(callback: F, kind: CallbackKind) -> Callback where - F: Fn(IdpServerInfo, RequestParameters) -> BoxFuture<'static, Result> + F: Fn(CallbackContext) -> BoxFuture<'static, Result> + Send + Sync + 'static, { - Self { - inner: Arc::new(CallbacksInner { - on_request: Box::new(on_request), + Callback { + inner: Arc::new(CallbackInner { + f: Box::new(callback), }), + kind, + } + } + + /// Create a new human token request callback. + pub fn human(callback: F) -> State + where + F: Fn(CallbackContext) -> BoxFuture<'static, Result> + + Send + + Sync + + 'static, + { + Self::create_state(callback, CallbackKind::Human) + } + + /// Create a new machine token request callback. + pub fn machine(callback: F) -> State + where + F: Fn(CallbackContext) -> BoxFuture<'static, Result> + + Send + + Sync + + 'static, + { + Self::create_state(callback, CallbackKind::Machine) + } + + fn create_state(callback: F, kind: CallbackKind) -> State + where + F: Fn(CallbackContext) -> BoxFuture<'static, Result> + + Send + + Sync + + 'static, + { + State { + callback: Self::new(callback, kind), + cache: Arc::new(RwLock::new(Cache::new())), } } } -impl std::fmt::Debug for Callbacks { +impl std::fmt::Debug for Callback { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Callbacks").finish() + f.debug_struct("Callback").finish() } } -struct CallbacksInner { - on_request: Box< - dyn Fn(IdpServerInfo, RequestParameters) -> BoxFuture<'static, Result> - + Send - + Sync, - >, - // on_refresh: Option IdpServerResponse + Send + Sync>>, +pub struct CallbackInner { + f: Box BoxFuture<'static, Result> + Send + Sync>, } -#[derive(Debug, Deserialize)] +#[derive(Debug, Clone)] +pub struct Cache { + idp_server_info: Option, + refresh_token: Option, + access_token: Option, + token_gen_id: u32, + last_call_time: Instant, +} + +impl Cache { + fn new() -> Self { + Self { + idp_server_info: None, + refresh_token: None, + access_token: None, + token_gen_id: 0, + last_call_time: Instant::now(), + } + } +} + +#[derive(Clone, Debug, Deserialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] pub struct IdpServerInfo { @@ -69,6 +150,15 @@ pub struct IdpServerInfo { pub request_scopes: Option>, } +#[derive(Debug)] +#[non_exhaustive] +pub struct CallbackContext { + pub timeout_seconds: Option, + pub version: u32, + pub refresh_token: Option, + pub idp_info: Option, +} + #[derive(TypedBuilder)] #[builder(field_defaults(setter(into)))] #[non_exhaustive] @@ -78,26 +168,151 @@ pub struct IdpServerResponse { pub refresh_token: Option, } -#[derive(Debug)] -#[non_exhaustive] -pub struct RequestParameters { - pub deadline: Instant, +pub(crate) fn build_speculative_client_first(credential: &Credential) -> Command { + self::build_client_first(credential, None) +} + +/// Constructs the first client message in the OIDC handshake for speculative authentication +pub(crate) fn build_client_first( + credential: &Credential, + server_api: Option<&ServerApi>, +) -> Command { + let mut auth_command_doc = doc! { + "authenticate": 1, + "mechanism": MONGODB_OIDC_STR, + }; + + if credential.oidc_callback.is_none() { + auth_command_doc.insert("jwt", ""); + } else if let Some(access_token) = get_access_token(credential) { + auth_command_doc.insert("jwt", access_token); + } + + let mut command = Command::new("authenticate", "$external", auth_command_doc); + if let Some(server_api) = server_api { + command.set_server_api(server_api); + } + + command +} + +fn get_access_token(credential: &Credential) -> Option { + credential + .oidc_callback + .as_ref() + .unwrap() + .cache + .read() + .unwrap() + .access_token + .clone() +} + +fn get_refresh_token_and_idp_info( + credential: &Credential, +) -> (Option, Option) { + let cache = credential + .oidc_callback + .as_ref() + // this unwrap is safe because this function is only called from within authenticate_human + .unwrap() + .cache + .read() + .unwrap(); + let refresh_token = cache.refresh_token.clone(); + let idp_info = cache.idp_server_info.clone(); + (refresh_token, idp_info) +} + +pub(crate) async fn reauthenticate_stream( + conn: &mut Connection, + credential: &Credential, + server_api: Option<&ServerApi>, +) -> Result<()> { + invalidate_caches(conn, credential); + authenticate_stream(conn, credential, server_api, None).await } pub(crate) async fn authenticate_stream( conn: &mut Connection, credential: &Credential, server_api: Option<&ServerApi>, + server_first: impl Into>, ) -> Result<()> { - let source = credential.source.as_deref().unwrap_or("$external"); - let callbacks = credential - .oidc_callbacks + if server_first.into().is_some() { + // speculative authentication succeeded, no need to authenticate again + return Ok(()); + } + + let Callback { inner, kind } = credential + .oidc_callback .as_ref() .ok_or_else(|| auth_error("no callbacks supplied"))? + .callback .clone(); + match kind { + CallbackKind::Machine => authenticate_machine(conn, credential, server_api, inner).await, + CallbackKind::Human => authenticate_human(conn, credential, server_api, inner).await, + } +} + +fn update_caches( + conn: &Connection, + credential: &Credential, + response: &IdpServerResponse, + idp_server_info: Option, +) { + let mut token_gen_id = conn.oidc_token_gen_id.write().unwrap(); + let mut cred_cache = credential + .oidc_callback + .as_ref() + // unwrap() is safe here because authenticate_human is only called if oidc_callback is Some + .unwrap() + .cache + .write() + .unwrap(); + if idp_server_info.is_some() { + cred_cache.idp_server_info = idp_server_info; + } + cred_cache.access_token = Some(response.access_token.clone()); + cred_cache.refresh_token = response.refresh_token.clone(); + cred_cache.last_call_time = Instant::now(); + cred_cache.token_gen_id += 1; + *token_gen_id = cred_cache.token_gen_id; +} + +fn invalidate_caches(conn: &Connection, credential: &Credential) { + let mut token_gen_id = conn.oidc_token_gen_id.write().unwrap(); + let mut cred_cache = credential + .oidc_callback + .as_ref() + // unwrap() is safe here because authenticate_human/machine is only called if oidc_callback is Some + .unwrap() + .cache + .write() + .unwrap(); + // It should be impossible for token_gen_id to be > cache.token_gen_id, but we check just in + // case + if *token_gen_id >= cred_cache.token_gen_id { + cred_cache.access_token = None; + *token_gen_id = 0; + } +} + +// send_sasl_start_command creates and sends a sasl_start command handling either +// one step or two step sasl based on whether or not the access token is Some. +async fn send_sasl_start_command( + source: &str, + conn: &mut Connection, + credential: &Credential, + server_api: Option<&ServerApi>, + access_token: Option, +) -> Result { let mut start_doc = rawdoc! {}; - if let Some(username) = credential.username.as_deref() { + if let Some(access_token) = access_token { + start_doc.append("jwt", access_token); + } else if let Some(username) = credential.username.as_deref() { start_doc.append("n", username); } let sasl_start = SaslStart::new( @@ -107,20 +322,38 @@ pub(crate) async fn authenticate_stream( server_api.cloned(), ) .into_command(); - let response = send_sasl_command(conn, sasl_start).await?; + send_sasl_command(conn, sasl_start).await +} + +async fn do_two_step_auth( + source: &str, + conn: &mut Connection, + credential: &Credential, + server_api: Option<&ServerApi>, + callback: Arc, + timeout: Duration, +) -> Result<()> { + let response = send_sasl_start_command(source, conn, credential, server_api, None).await?; if response.done { return Err(invalid_auth_response()); } + + let server_info: IdpServerInfo = + bson::from_slice(&response.payload).map_err(|_| invalid_auth_response())?; let idp_response = { - let server_info: IdpServerInfo = - bson::from_slice(&response.payload).map_err(|_| invalid_auth_response())?; - const CALLBACK_TIMEOUT: Duration = Duration::from_secs(5 * 60); - let cb_params = RequestParameters { - deadline: Instant::now() + CALLBACK_TIMEOUT, + let cb_context = CallbackContext { + timeout_seconds: Some(Instant::now() + timeout), + version: API_VERSION, + refresh_token: None, + idp_info: Some(server_info.clone()), }; - (callbacks.inner.on_request)(server_info, cb_params).await? + (callback.f)(cb_context).await? }; + // Update the credential and connection caches with the access token and the credential cache + // with the refresh token and token_gen_id + update_caches(conn, credential, &idp_response, Some(server_info)); + let sasl_continue = SaslContinue::new( source.to_string(), response.conversation_id, @@ -136,6 +369,151 @@ pub(crate) async fn authenticate_stream( Ok(()) } +fn get_allowed_hosts(mechanism_properties: Option<&Document>) -> Result> { + if mechanism_properties.is_none() { + return Ok(Vec::from(DEFAULT_ALLOWED_HOSTS)); + } + if let Some(allowed_hosts) = + mechanism_properties.and_then(|p| p.get_array("ALLOWED_HOSTS").ok()) + { + return allowed_hosts + .iter() + .map(|host| { + host.as_str() + .ok_or_else(|| auth_error("ALLOWED_HOSTS must contain only strings")) + }) + .collect::>>(); + } + Ok(Vec::from(DEFAULT_ALLOWED_HOSTS)) +} + +fn validate_address_with_allowed_hosts( + mechanism_properties: Option<&Document>, + address: &ServerAddress, +) -> Result<()> { + let hostname = if let ServerAddress::Tcp { host, .. } = address { + host.as_str() + } else { + return Err(auth_error("OIDC human flow only supports TCP addresses")); + }; + for pattern in get_allowed_hosts(mechanism_properties)? { + if pattern == hostname { + return Ok(()); + } + if pattern.starts_with("*.") && hostname.ends_with(&pattern[1..]) { + return Ok(()); + } + } + Err(auth_error( + "The Connection address is not in the allowed list of hosts", + )) +} + +async fn authenticate_human( + conn: &mut Connection, + credential: &Credential, + server_api: Option<&ServerApi>, + callback: Arc, +) -> Result<()> { + validate_address_with_allowed_hosts(credential.mechanism_properties.as_ref(), &conn.address)?; + + let source = credential.source.as_deref().unwrap_or("$external"); + + // If the access token is in the cache, we can use it to send the sasl start command and avoid + // the callback and sasl_continue + if let Some(access_token) = get_access_token(credential) { + let response = send_sasl_start_command( + source, + conn, + credential, + server_api, + Some(access_token.clone()), + ) + .await?; + if response.done { + return Ok(()); + } + invalidate_caches(conn, credential); + } + + // If the cache has a refresh token, we can avoid asking for the server info. + if let (refresh_token @ Some(_), idp_info) = get_refresh_token_and_idp_info(credential) { + let idp_response = { + let cb_context = CallbackContext { + timeout_seconds: Some(Instant::now() + HUMAN_CALLBACK_TIMEOUT), + version: API_VERSION, + refresh_token, + idp_info, + }; + (callback.f)(cb_context).await? + }; + // Update the credential and connection caches with the access token and the credential + // cache with the refresh token and token_gen_id + update_caches(conn, credential, &idp_response, None); + + let access_token = idp_response.access_token; + let response = send_sasl_start_command( + source, + conn, + credential, + server_api, + Some(access_token.clone()), + ) + .await?; + if response.done { + return Ok(()); + } + invalidate_caches(conn, credential); + } + + do_two_step_auth( + source, + conn, + credential, + server_api, + callback, + HUMAN_CALLBACK_TIMEOUT, + ) + .await +} + +async fn authenticate_machine( + conn: &mut Connection, + credential: &Credential, + server_api: Option<&ServerApi>, + callback: Arc, +) -> Result<()> { + let source = credential.source.as_deref().unwrap_or("$external"); + + // If the access token is in the cache, we can use it to send the sasl start command and avoid + // the callback and sasl_continue + if let Some(access_token) = get_access_token(credential) { + let response = send_sasl_start_command( + source, + conn, + credential, + server_api, + Some(access_token.clone()), + ) + .await?; + if response.done { + return Ok(()); + } + invalidate_caches(conn, credential); + tokio::time::sleep(MACHINE_INVALIDATE_SLEEP_TIMEOUT).await; + } + + do_two_step_auth( + source, + conn, + credential, + server_api, + callback, + MACHINE_CALLBACK_TIMEOUT, + ) + .await +} + fn auth_error(s: impl AsRef) -> Error { Error::authentication_error(MONGODB_OIDC_STR, s.as_ref()) } diff --git a/src/client/csfle/client_encryption.rs b/src/client/csfle/client_encryption.rs index 5b6c0c045..aee2f4438 100644 --- a/src/client/csfle/client_encryption.rs +++ b/src/client/csfle/client_encryption.rs @@ -99,13 +99,13 @@ impl ClientEncryption { /// Finds a single key document with the given UUID (BSON binary subtype 0x04). /// Returns the result of the internal find() operation on the key vault collection. pub async fn get_key(&self, id: &Binary) -> Result> { - self.key_vault.find_one(doc! { "_id": id }, None).await + self.key_vault.find_one(doc! { "_id": id }).await } /// Finds all documents in the key vault collection. /// Returns the result of the internal find() operation on the key vault collection. pub async fn get_keys(&self) -> Result> { - self.key_vault.find(doc! {}, None).await + self.key_vault.find(doc! {}).await } /// Adds a keyAltName to the keyAltNames array of the key document in the key vault collection @@ -120,7 +120,6 @@ impl ClientEncryption { .find_one_and_update( doc! { "_id": id }, doc! { "$addToSet": { "keyAltNames": key_alt_name } }, - None, ) .await } @@ -150,7 +149,7 @@ impl ClientEncryption { } }; self.key_vault - .find_one_and_update(doc! { "_id": id }, vec![update], None) + .find_one_and_update(doc! { "_id": id }, vec![update]) .await } @@ -160,7 +159,7 @@ impl ClientEncryption { key_alt_name: impl AsRef, ) -> Result> { self.key_vault - .find_one(doc! { "keyAltNames": key_alt_name.as_ref() }, None) + .find_one(doc! { "keyAltNames": key_alt_name.as_ref() }) .await } diff --git a/src/client/csfle/client_encryption/create_data_key.rs b/src/client/csfle/client_encryption/create_data_key.rs index b4307bee2..04d6fdfeb 100644 --- a/src/client/csfle/client_encryption/create_data_key.rs +++ b/src/client/csfle/client_encryption/create_data_key.rs @@ -11,25 +11,26 @@ use crate::{ use super::{ClientEncryption, MasterKey}; -action_impl! { - impl<'a> Action for CreateDataKey<'a> { - type Future = CreateDataKeyFuture; +#[action_impl] +impl<'a> Action for CreateDataKey<'a> { + type Future = CreateDataKeyFuture; - async fn execute(self) -> Result { - #[allow(unused_mut)] - let mut provider = self.master_key.provider(); - #[cfg(test)] - if let Some(tp) = self.test_kms_provider { - provider = tp; - } - let ctx = self.client_enc.create_data_key_ctx(provider, self.master_key, self.options)?; - let data_key = self.client_enc.exec.run_ctx(ctx, None).await?; - self.client_enc.key_vault.insert_one(&data_key, None).await?; - let bin_ref = data_key - .get_binary("_id") - .map_err(|e| Error::internal(format!("invalid data key id: {}", e)))?; - Ok(bin_ref.to_binary()) + async fn execute(self) -> Result { + #[allow(unused_mut)] + let mut provider = self.master_key.provider(); + #[cfg(test)] + if let Some(tp) = self.test_kms_provider { + provider = tp; } + let ctx = self + .client_enc + .create_data_key_ctx(provider, self.master_key, self.options)?; + let data_key = self.client_enc.exec.run_ctx(ctx, None).await?; + self.client_enc.key_vault.insert_one(&data_key).await?; + let bin_ref = data_key + .get_binary("_id") + .map_err(|e| Error::internal(format!("invalid data key id: {}", e)))?; + Ok(bin_ref.to_binary()) } } diff --git a/src/client/csfle/client_encryption/encrypt.rs b/src/client/csfle/client_encryption/encrypt.rs index 1941d892e..2c4be2927 100644 --- a/src/client/csfle/client_encryption/encrypt.rs +++ b/src/client/csfle/client_encryption/encrypt.rs @@ -11,43 +11,41 @@ use crate::{ use super::ClientEncryption; -action_impl! { - impl<'a> Action for Encrypt<'a, Value> { - type Future = EncryptFuture; +#[action_impl] +impl<'a> Action for Encrypt<'a, Value> { + type Future = EncryptFuture; - async fn execute(self) -> Result { - let ctx = self - .client_enc - .get_ctx_builder(self.key, self.algorithm, self.options.unwrap_or_default())? - .build_explicit_encrypt(self.mode.value)?; - let result = self.client_enc.exec.run_ctx(ctx, None).await?; - let bin_ref = result - .get_binary("v") - .map_err(|e| Error::internal(format!("invalid encryption result: {}", e)))?; - Ok(bin_ref.to_binary()) - } + async fn execute(self) -> Result { + let ctx = self + .client_enc + .get_ctx_builder(self.key, self.algorithm, self.options.unwrap_or_default())? + .build_explicit_encrypt(self.mode.value)?; + let result = self.client_enc.exec.run_ctx(ctx, None).await?; + let bin_ref = result + .get_binary("v") + .map_err(|e| Error::internal(format!("invalid encryption result: {}", e)))?; + Ok(bin_ref.to_binary()) } } -action_impl! { - impl<'a> Action for Encrypt<'a, Expression> { - type Future = EncryptExpressionFuture; +#[action_impl] +impl<'a> Action for Encrypt<'a, Expression> { + type Future = EncryptExpressionFuture; - async fn execute(self) -> Result { - let ctx = self - .client_enc - .get_ctx_builder(self.key, self.algorithm, self.options.unwrap_or_default())? - .build_explicit_encrypt_expression(self.mode.value)?; - let result = self.client_enc.exec.run_ctx(ctx, None).await?; - let doc_ref = result - .get_document("v") - .map_err(|e| Error::internal(format!("invalid encryption result: {}", e)))?; - let doc = doc_ref - .to_owned() - .to_document() - .map_err(|e| Error::internal(format!("invalid encryption result: {}", e)))?; - Ok(doc) - } + async fn execute(self) -> Result { + let ctx = self + .client_enc + .get_ctx_builder(self.key, self.algorithm, self.options.unwrap_or_default())? + .build_explicit_encrypt_expression(self.mode.value)?; + let result = self.client_enc.exec.run_ctx(ctx, None).await?; + let doc_ref = result + .get_document("v") + .map_err(|e| Error::internal(format!("invalid encryption result: {}", e)))?; + let doc = doc_ref + .to_owned() + .to_document() + .map_err(|e| Error::internal(format!("invalid encryption result: {}", e)))?; + Ok(doc) } } diff --git a/src/client/csfle/state_machine.rs b/src/client/csfle/state_machine.rs index 13f75cbd9..bc7aa5f05 100644 --- a/src/client/csfle/state_machine.rs +++ b/src/client/csfle/state_machine.rs @@ -15,7 +15,6 @@ use tokio::{ use crate::{ client::{options::ServerAddress, WeakClient}, - coll::options::FindOptions, error::{Error, Result}, operation::{run_command::RunCommand, RawOutput}, options::ReadConcern, @@ -164,12 +163,8 @@ impl CryptExecutor { .database(&kv_ns.db) .collection::(&kv_ns.coll); let mut cursor = kv_coll - .find( - filter, - FindOptions::builder() - .read_concern(ReadConcern::majority()) - .build(), - ) + .find(filter) + .read_concern(ReadConcern::majority()) .await?; while cursor.advance().await? { ctx.mongo_feed(cursor.current())?; diff --git a/src/client/executor.rs b/src/client/executor.rs index 693c4546b..553151512 100644 --- a/src/client/executor.rs +++ b/src/client/executor.rs @@ -287,7 +287,8 @@ impl Client { } /// Selects a server and executes the given operation on it, optionally using a provided - /// session. Retries the operation upon failure if retryability is supported. + /// session. Retries the operation upon failure if retryability is supported or after + /// reauthenticating if reauthentication is required. async fn execute_operation_with_retry( &self, op: &mut T, @@ -404,6 +405,30 @@ impl Client { implicit_session, }, Err(mut err) => { + // If the error is a reauthentication required error, we reauthenticate and + // retry the operation. + if err.is_reauthentication_required() { + let credential = self.inner.options.credential.as_ref().ok_or( + ErrorKind::Authentication { + message: "No Credential when reauthentication required error \ + occured" + .to_string(), + }, + )?; + let server_api = self.inner.options.server_api.as_ref(); + + credential + .mechanism + .as_ref() + .ok_or(ErrorKind::Authentication { + message: "No AuthMechanism when reauthentication required error \ + occured" + .to_string(), + })? + .reauthenticate_stream(&mut conn, credential, server_api) + .await?; + continue; + } err.wire_version = conn.stream_description()?.max_wire_version; // Retryable writes are only supported by storage engines with document-level diff --git a/src/client/options.rs b/src/client/options.rs index 44b75cb02..7258cd4c6 100644 --- a/src/client/options.rs +++ b/src/client/options.rs @@ -24,12 +24,17 @@ use serde_with::skip_serializing_none; use strsim::jaro_winkler; use typed_builder::TypedBuilder; +#[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" +))] +use crate::options::Compressor; #[cfg(test)] use crate::srv::LookupHosts; use crate::{ bson::{doc, Bson, Document}, client::auth::{AuthMechanism, Credential}, - compression::Compressor, concern::{Acknowledgment, ReadConcern, WriteConcern}, error::{Error, ErrorKind, Result}, event::EventHandler, @@ -249,25 +254,6 @@ impl ServerAddress { }) } - #[cfg(test)] - pub(crate) fn into_document(self) -> Document { - match self { - Self::Tcp { host, port } => { - doc! { - "host": host, - "port": port.map(|i| Bson::Int32(i.into())).unwrap_or(Bson::Null) - } - } - #[cfg(unix)] - Self::Unix { path } => { - doc! { - "host": path.to_string_lossy().as_ref(), - "port": Bson::Null, - } - } - } - } - pub(crate) fn host(&self) -> Cow<'_, str> { match self { Self::Tcp { host, .. } => Cow::Borrowed(host.as_str()), @@ -391,10 +377,15 @@ pub struct ClientOptions { #[builder(default)] pub app_name: Option, - /// The compressors that the Client is willing to use in the order they are specified - /// in the configuration. The Client sends this list of compressors to the server. - /// The server responds with the intersection of its supported list of compressors. - /// The order of compressors indicates preference of compressors. + /// The allowed compressors to use to compress messages sent to and decompress messages + /// received from the server. This list should be specified in priority order, as the + /// compressor used for messages will be the first compressor in this list that is also + /// supported by the server selected for operations. + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] #[builder(default)] #[serde(skip)] pub compressors: Option>, @@ -742,7 +733,11 @@ impl Serialize for ClientOptions { writeconcern: &self.write_concern, loadbalanced: &self.load_balanced, zlibcompressionlevel: &None, - srvmaxhosts: self.srv_max_hosts.map(|v| v as i32), + srvmaxhosts: self + .srv_max_hosts + .map(|v| v.try_into()) + .transpose() + .map_err(serde::ser::Error::custom)?, }; client_options.serialize(serializer) @@ -834,6 +829,11 @@ pub struct ConnectionString { /// By default, connections will not be closed due to being idle. pub max_idle_time: Option, + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] /// The compressors that the Client is willing to use in the order they are specified /// in the configuration. The Client sends this list of compressors to the server. /// The server responds with the intersection of its supported list of compressors. @@ -1342,6 +1342,11 @@ impl ClientOptions { max_idle_time: conn_str.max_idle_time, max_connecting: conn_str.max_connecting, server_selection_timeout: conn_str.server_selection_timeout, + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] compressors: conn_str.compressors, connect_timeout: conn_str.connect_timeout, retry_reads: conn_str.retry_reads, @@ -1413,6 +1418,11 @@ impl ClientOptions { } } + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] if let Some(ref compressors) = self.compressors { for compressor in compressors { compressor.validate()?; @@ -1481,12 +1491,19 @@ impl ClientOptions { if self.hosts.is_empty() { self.hosts = other.hosts; } + + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] + merge_options!(other, self, [compressors]); + merge_options!( other, self, [ app_name, - compressors, cmap_event_handler, command_event_handler, connect_timeout, @@ -1937,14 +1954,22 @@ impl ConnectionString { } } - // If zlib and zlib_compression_level are specified then write zlib_compression_level into - // zlib enum - if let (Some(compressors), Some(zlib_compression_level)) = - (self.compressors.as_mut(), parts.zlib_compression) - { - for compressor in compressors { - compressor.write_zlib_level(zlib_compression_level) + #[cfg(feature = "zlib-compression")] + if let Some(zlib_compression_level) = parts.zlib_compression { + if let Some(compressors) = self.compressors.as_mut() { + for compressor in compressors { + compressor.write_zlib_level(zlib_compression_level)?; + } + } + } + #[cfg(not(feature = "zlib-compression"))] + if parts.zlib_compression.is_some() { + return Err(ErrorKind::InvalidArgument { + message: "zlibCompressionLevel may not be specified without the zlib-compression \ + feature flag enabled" + .into(), } + .into()); } Ok(parts) @@ -2047,10 +2072,26 @@ impl ConnectionString { Some(index) => { let (k, v) = exclusive_split_at(kvp, index); let key = k.ok_or_else(err_func)?; - if key == "ALLOWED_HOSTS" { - return Err(Error::invalid_argument( - "ALLOWED_HOSTS must only be specified through client options", - )); + match key { + "ALLOWED_HOSTS" => { + return Err(Error::invalid_argument( + "ALLOWED_HOSTS must only be specified through client \ + options", + )); + } + "OIDC_CALLBACK" => { + return Err(Error::invalid_argument( + "OIDC_CALLBACK must only be specified through client \ + options", + )); + } + "OIDC_HUMAN_CALLBACK" => { + return Err(Error::invalid_argument( + "OIDC_HUMAN_CALLBACK must only be specified through \ + client options", + )); + } + _ => {} } let value = v.ok_or_else(err_func)?; doc.insert(key, value); @@ -2060,16 +2101,20 @@ impl ConnectionString { } parts.auth_mechanism_properties = Some(doc); } + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] "compressors" => { - let compressors = value - .split(',') - .filter_map(|x| Compressor::parse_str(x).ok()) - .collect::>(); - self.compressors = if compressors.is_empty() { - None - } else { - Some(compressors) + let mut compressors: Option> = None; + for compressor in value.split(',') { + let compressor = Compressor::from_str(compressor)?; + compressors + .get_or_insert_with(Default::default) + .push(compressor); } + self.compressors = compressors; } k @ "connecttimeoutms" => { self.connect_timeout = Some(Duration::from_millis(get_duration!(value, k))); @@ -2775,18 +2820,20 @@ mod tests { ], selection_criteria: Some( ReadPreference::SecondaryPreferred { - options: ReadPreferenceOptions::builder() - .tag_sets(vec![ - tag_set! { - "dc" => "ny", - "rack" => "1" - }, - tag_set! { - "dc" => "ny" - }, - tag_set! {}, - ]) - .build() + options: Some( + ReadPreferenceOptions::builder() + .tag_sets(vec![ + tag_set! { + "dc" => "ny", + "rack" => "1" + }, + tag_set! { + "dc" => "ny" + }, + tag_set! {}, + ]) + .build() + ) } .into() ), diff --git a/src/client/options/test.rs b/src/client/options/test.rs index d664eabcc..409642ddb 100644 --- a/src/client/options/test.rs +++ b/src/client/options/test.rs @@ -1,17 +1,60 @@ use std::time::Duration; use bson::UuidRepresentation; +use once_cell::sync::Lazy; use pretty_assertions::assert_eq; use serde::Deserialize; use crate::{ bson::{Bson, Document}, + bson_util::get_int, client::options::{ClientOptions, ConnectionString, ServerAddress}, - error::ErrorKind, - options::Compressor, - test::run_spec_test, + error::{Error, ErrorKind, Result}, + test::spec::deserialize_spec_tests, Client, }; + +static SKIPPED_TESTS: Lazy> = Lazy::new(|| { + let mut skipped_tests = vec![ + // TODO RUST-1309: unskip this test + "tlsInsecure is parsed correctly", + // The driver does not support maxPoolSize=0 + "maxPoolSize=0 does not error", + // TODO RUST-226: unskip this test + "Valid tlsCertificateKeyFilePassword is parsed correctly", + // TODO RUST-911: unskip this test + "SRV URI with custom srvServiceName", + // TODO RUST-229: unskip the following tests + "Single IP literal host without port", + "Single IP literal host with port", + "Multiple hosts (mixed formats)", + "User info for single IP literal host without database", + "User info for single IP literal host with database", + "User info for multiple hosts with database", + ]; + + // TODO RUST-1896: unskip this test when openssl-tls is enabled + // if cfg!(not(feature = "openssl-tls")) + skipped_tests.push("tlsAllowInvalidHostnames is parsed correctly"); + // } + + if cfg!(not(feature = "zlib-compression")) { + skipped_tests.push("Valid compression options are parsed correctly"); + skipped_tests.push("Non-numeric zlibCompressionLevel causes a warning"); + skipped_tests.push("Too low zlibCompressionLevel causes a warning"); + skipped_tests.push("Too high zlibCompressionLevel causes a warning"); + } + + if cfg!(not(all( + feature = "zlib-compression", + feature = "snappy-compression" + ))) { + skipped_tests.push("Multiple compressors are parsed correctly"); + } + + skipped_tests +}); + #[derive(Debug, Deserialize)] struct TestFile { pub tests: Vec, @@ -24,11 +67,43 @@ struct TestCase { uri: String, valid: bool, warning: Option, - hosts: Option>, + hosts: Option>, auth: Option, options: Option, } +// The connection string tests' representation of a server address. We use this indirection to avoid +// deserialization failures when the tests specify an IPv6 address. +// +// TODO RUST-229: remove this struct and deserialize directly into ServerAddress +#[derive(Debug, Deserialize)] +struct TestServerAddress { + #[serde(rename = "type")] + host_type: String, + host: String, + port: Option, +} + +impl TryFrom<&TestServerAddress> for ServerAddress { + type Error = Error; + + fn try_from(test_server_address: &TestServerAddress) -> Result { + if test_server_address.host_type.as_str() == "ip_literal" { + return Err(ErrorKind::Internal { + message: "test using ip_literal host type should be skipped".to_string(), + } + .into()); + } + + let mut address = Self::parse(&test_server_address.host)?; + if let ServerAddress::Tcp { ref mut port, .. } = address { + *port = test_server_address.port; + } + + Ok(address) + } +} + #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase", deny_unknown_fields)] struct TestAuth { @@ -46,168 +121,120 @@ impl TestAuth { } } -async fn run_test(test_file: TestFile) { - for mut test_case in test_file.tests { - if - // TODO: RUST-229: Implement IPv6 Support - test_case.description.contains("ipv6") - || test_case.description.contains("IP literal") - // TODO: RUST-226: Investigate whether tlsCertificateKeyFilePassword is supported in rustls - || test_case - .description - .contains("tlsCertificateKeyFilePassword") - // Not Implementing - || test_case.description.contains("tlsAllowInvalidHostnames") - || test_case.description.contains("single-threaded") - || test_case.description.contains("serverSelectionTryOnce") - || test_case.description.contains("relative path") - // Compression is implemented but will only pass the tests if all - // the appropriate feature flags are set. That is because - // valid compressors are only parsed correctly if the corresponding feature flag is set. - // (otherwise they are treated as invalid, and hence ignored) - || (test_case.description.contains("compress") && - !cfg!( - all(features = "zlib-compression", - features = "zstd-compression", - features = "snappy-compression" - ) - ) - ) - // The Rust driver disallows `maxPoolSize=0`. - || test_case.description.contains("maxPoolSize=0 does not error") - // TODO RUST-933 implement custom srvServiceName support - || test_case.description.contains("custom srvServiceName") - { - continue; - } +async fn run_tests(path: &[&str], skipped_files: &[&str]) { + let test_files = deserialize_spec_tests::(path, Some(skipped_files)) + .into_iter() + .map(|(test_file, _)| test_file); - #[cfg(not(unix))] - if test_case.description.contains("Unix") { - continue; - } + for test_file in test_files { + for test_case in test_file.tests { + if SKIPPED_TESTS.contains(&test_case.description.as_str()) { + continue; + } - let warning = test_case.warning.take().unwrap_or(false); - - if test_case.valid && !warning { - let mut is_unsupported_host_type = false; - // hosts - if let Some(mut json_hosts) = test_case.hosts.take() { - // skip over unsupported host types - #[cfg(not(unix))] - { - is_unsupported_host_type = json_hosts.iter_mut().any(|h_json| { - matches!( - h_json.remove("type").as_ref().and_then(Bson::as_str), - Some("ip_literal") | Some("unix") - ) - }); + let client_options_result = ClientOptions::parse(&test_case.uri).await; + + // The driver does not log warnings for unsupported or incorrect connection string + // values, so expect an error when warning is set to true. + if test_case.valid && test_case.warning != Some(true) { + let client_options = client_options_result.expect(&test_case.description); + + if let Some(ref expected_hosts) = test_case.hosts { + let expected_hosts = expected_hosts + .iter() + .map(TryFrom::try_from) + .collect::>>() + .expect(&test_case.description); + + assert_eq!( + client_options.hosts, expected_hosts, + "{}", + test_case.description + ); } - #[cfg(unix)] - { - is_unsupported_host_type = json_hosts.iter_mut().any(|h_json| { - matches!( - h_json.remove("type").as_ref().and_then(Bson::as_str), - Some("ip_literal") - ) - }); - } + let mut actual_options = + bson::to_document(&client_options).expect(&test_case.description); - if !is_unsupported_host_type { - let options = ClientOptions::parse(&test_case.uri).await.unwrap(); - let hosts: Vec<_> = options - .hosts - .into_iter() - .map(ServerAddress::into_document) - .collect(); + if let Some(mode) = actual_options.remove("mode") { + actual_options.insert("readPreference", mode); + } - assert_eq!(hosts, json_hosts); + if let Some(tags) = actual_options.remove("tagSets") { + actual_options.insert("readPreferenceTags", tags); } - } - if !is_unsupported_host_type { - // options - let options = ClientOptions::parse(&test_case.uri) - .await - .expect(&test_case.description); - let mut options_doc = bson::to_document(&options).unwrap_or_else(|_| { - panic!( - "{}: Failed to serialize ClientOptions", - &test_case.description - ) - }); - if let Some(json_options) = test_case.options { - let mut json_options: Document = json_options - .into_iter() - .filter_map(|(k, v)| { - if let Bson::Null = v { - None - } else { - Some((k.to_lowercase(), v)) - } - }) - .collect(); - // tlsallowinvalidcertificates and tlsinsecure must be inverse of each other - if !json_options.contains_key("tlsallowinvalidcertificates") { - if let Some(val) = json_options.remove("tlsinsecure") { - json_options - .insert("tlsallowinvalidcertificates", !val.as_bool().unwrap()); - } + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] + if let Some(ref compressors) = client_options.compressors { + use crate::options::Compressor; + + actual_options.insert( + "compressors", + compressors + .iter() + .map(Compressor::name) + .collect::>(), + ); + + #[cfg(feature = "zlib-compression")] + if let Some(zlib_compression_level) = compressors + .iter() + .filter_map(|compressor| match compressor { + Compressor::Zlib { level } => *level, + _ => None, + }) + .next() + { + actual_options.insert("zlibCompressionLevel", zlib_compression_level); } + } - // The default types parsed from the test file don't match those serialized - // from the `ClientOptions` struct. - if let Ok(min) = json_options.get_i32("minpoolsize") { - json_options.insert("minpoolsize", Bson::Int64(min.into())); - } - if let Ok(max) = json_options.get_i32("maxpoolsize") { - json_options.insert("maxpoolsize", Bson::Int64(max.into())); - } - if let Ok(max_connecting) = json_options.get_i32("maxconnecting") { - json_options.insert("maxconnecting", Bson::Int64(max_connecting.into())); - } + if let Some(ref expected_options) = test_case.options { + for (expected_key, expected_value) in expected_options { + if expected_value == &Bson::Null { + continue; + } - options_doc = options_doc - .into_iter() - .filter(|(ref key, _)| json_options.contains_key(key)) - .collect(); - - // This is required because compressor is not serialize, but the spec tests - // still expect to see serialized compressors. - // This hardcodes the compressors into the options. - if let Some(compressors) = options.compressors { - options_doc.insert( - "compressors", - compressors - .iter() - .map(Compressor::name) - .collect::>(), - ); - #[cfg(feature = "zlib-compression")] - for compressor in compressors { - if let Compressor::Zlib { level: Some(level) } = compressor { - options_doc.insert("zlibcompressionlevel", level); - } + let (_, actual_value) = actual_options + .iter() + .find(|(actual_key, _)| { + actual_key.to_ascii_lowercase() == expected_key.to_ascii_lowercase() + }) + .unwrap_or_else(|| { + panic!( + "{}: parsed options missing {} key", + test_case.description, expected_key + ) + }); + + if let Some(expected_number) = get_int(expected_value) { + let actual_number = get_int(actual_value).unwrap_or_else(|| { + panic!( + "{}: {} should be a numeric value but got {}", + &test_case.description, expected_key, actual_value + ) + }); + assert_eq!(actual_number, expected_number, "{}", test_case.description); + } else { + assert_eq!(actual_value, expected_value, "{}", test_case.description); } } - assert_eq!(options_doc, json_options, "{}", test_case.description) } if let Some(test_auth) = test_case.auth { - let options = ClientOptions::parse(&test_case.uri).await.unwrap(); - assert!(test_auth.matches_client_options(&options)); + assert!(test_auth.matches_client_options(&client_options)); } - } - } else { - let expected_type = if warning { "warning" } else { "error" }; - - match ClientOptions::parse(&test_case.uri) - .await - .map_err(|e| *e.kind) - { - Ok(_) => panic!("expected {}", expected_type), - Err(ErrorKind::InvalidArgument { .. }) => {} - Err(e) => panic!("expected InvalidArgument, but got {:?}", e), + } else { + let error = client_options_result.expect_err(&test_case.description); + assert!( + matches!(*error.kind, ErrorKind::InvalidArgument { .. }), + "{}", + &test_case.description + ); } } } @@ -215,12 +242,21 @@ async fn run_test(test_file: TestFile) { #[tokio::test] async fn run_uri_options_spec_tests() { - run_spec_test(&["uri-options"], run_test).await; + let skipped_files = vec!["single-threaded-options.json"]; + run_tests(&["uri-options"], &skipped_files).await; } #[tokio::test] async fn run_connection_string_spec_tests() { - run_spec_test(&["connection-string"], run_test).await; + let mut skipped_files = Vec::new(); + if cfg!(not(unix)) { + skipped_files.push("valid-unix_socket-absolute.json"); + skipped_files.push("valid-unix_socket-relative.json"); + // All the tests in this file use unix domain sockets + skipped_files.push("valid-db-with-dotted-name.json"); + } + + run_tests(&["connection-string"], &skipped_files).await; } async fn parse_uri(option: &str, suggestion: Option<&str>) { @@ -266,7 +302,9 @@ async fn uuid_representations() { ); } -async fn parse_uri_with_uuid_representation(uuid_repr: &str) -> Result { +async fn parse_uri_with_uuid_representation( + uuid_repr: &str, +) -> std::result::Result { match ConnectionString::parse(format!( "mongodb://localhost:27017/?uuidRepresentation={}", uuid_repr diff --git a/src/client/session.rs b/src/client/session.rs index c96c834b8..1a9da856c 100644 --- a/src/client/session.rs +++ b/src/client/session.rs @@ -1,3 +1,4 @@ +mod action; mod cluster_time; mod pool; #[cfg(test)] @@ -15,12 +16,9 @@ use uuid::Uuid; use crate::{ bson::{doc, spec::BinarySubtype, Binary, Bson, Document, Timestamp}, cmap::conn::PinnedConnectionHandle, - error::{ErrorKind, Result}, - operation::{AbortTransaction, CommitTransaction, Operation}, options::{SessionOptions, TransactionOptions}, - sdam::{ServerInfo, TransactionSupportStatus}, + sdam::ServerInfo, selection_criteria::SelectionCriteria, - BoxFuture, Client, }; pub use cluster_time::ClusterTime; @@ -65,11 +63,11 @@ pub(crate) static SESSIONS_UNSUPPORTED_COMMANDS: Lazy> = L /// # let client = Client::with_uri_str("mongodb://example.com").await?; /// # let coll: Collection = client.database("foo").collection("bar"); /// let mut session = client.start_session().await?; -/// let options = TransactionOptions::builder() +/// session +/// .start_transaction() /// .read_concern(ReadConcern::majority()) -/// .write_concern(WriteConcern::builder().w(Acknowledgment::Majority).build()) -/// .build(); -/// session.start_transaction(options).await?; +/// .write_concern(WriteConcern::majority()) +/// .await?; /// // A "TransientTransactionError" label indicates that the entire transaction can be retried /// // with a reasonable expectation that it will succeed. /// while let Err(error) = execute_transaction(&coll, &mut session).await { @@ -81,7 +79,7 @@ pub(crate) static SESSIONS_UNSUPPORTED_COMMANDS: Lazy> = L /// # } /// /// async fn execute_transaction(coll: &Collection, session: &mut ClientSession) -> Result<()> { -/// coll.insert_one_with_session(doc! { "x": 1 }, None, session).await?; +/// coll.insert_one(doc! { "x": 1 }).session(&mut *session).await?; /// coll.delete_one(doc! { "y": 2 }).session(&mut *session).await?; /// // An "UnknownTransactionCommitResult" label indicates that it is unknown whether the /// // commit has satisfied the write concern associated with the transaction. If an error @@ -335,354 +333,6 @@ impl ClientSession { self.server_session.dirty } - /// Starts a new transaction on this session with the given `TransactionOptions`. If no options - /// are provided, the session's `defaultTransactionOptions` will be used. This session must - /// be passed into each operation within the transaction; otherwise, the operation will be - /// executed outside of the transaction. - /// - /// Errors returned from operations executed within a transaction may include a - /// [`crate::error::TRANSIENT_TRANSACTION_ERROR`] label. This label indicates that the entire - /// transaction can be retried with a reasonable expectation that it will succeed. - /// - /// Transactions are supported on MongoDB 4.0+. The Rust driver currently only supports - /// transactions on replica sets. - /// - /// ```rust - /// # use mongodb::{bson::{doc, Document}, error::Result, Client, ClientSession}; - /// # - /// # async fn do_stuff() -> Result<()> { - /// # let client = Client::with_uri_str("mongodb://example.com").await?; - /// # let coll = client.database("foo").collection::("bar"); - /// # let mut session = client.start_session().await?; - /// session.start_transaction(None).await?; - /// let result = coll.insert_one_with_session(doc! { "x": 1 }, None, &mut session).await?; - /// session.commit_transaction().await?; - /// # Ok(()) - /// # } - /// ``` - pub async fn start_transaction( - &mut self, - options: impl Into>, - ) -> Result<()> { - if self - .options - .as_ref() - .and_then(|o| o.snapshot) - .unwrap_or(false) - { - return Err(ErrorKind::Transaction { - message: "Transactions are not supported in snapshot sessions".into(), - } - .into()); - } - match self.transaction.state { - TransactionState::Starting | TransactionState::InProgress => { - return Err(ErrorKind::Transaction { - message: "transaction already in progress".into(), - } - .into()); - } - TransactionState::Committed { .. } => { - self.unpin(); // Unpin session if previous transaction is committed. - } - _ => {} - } - match self.client.transaction_support_status().await? { - TransactionSupportStatus::Supported => { - let mut options = match options.into() { - Some(mut options) => { - if let Some(defaults) = self.default_transaction_options() { - merge_options!( - defaults, - options, - [ - read_concern, - write_concern, - selection_criteria, - max_commit_time - ] - ); - } - Some(options) - } - None => self.default_transaction_options().cloned(), - }; - resolve_options!( - self.client, - options, - [read_concern, write_concern, selection_criteria] - ); - - if let Some(ref options) = options { - if !options - .write_concern - .as_ref() - .map(|wc| wc.is_acknowledged()) - .unwrap_or(true) - { - return Err(ErrorKind::Transaction { - message: "transactions do not support unacknowledged write concerns" - .into(), - } - .into()); - } - } - - self.increment_txn_number(); - self.transaction.start(options); - Ok(()) - } - _ => Err(ErrorKind::Transaction { - message: "Transactions are not supported by this deployment".into(), - } - .into()), - } - } - - /// Commits the transaction that is currently active on this session. - /// - /// - /// This method may return an error with a [`crate::error::UNKNOWN_TRANSACTION_COMMIT_RESULT`] - /// label. This label indicates that it is unknown whether the commit has satisfied the write - /// concern associated with the transaction. If an error with this label is returned, it is - /// safe to retry the commit until the write concern is satisfied or an error without the label - /// is returned. - /// - /// ```rust - /// # use mongodb::{bson::{doc, Document}, error::Result, Client, ClientSession}; - /// # - /// # async fn do_stuff() -> Result<()> { - /// # let client = Client::with_uri_str("mongodb://example.com").await?; - /// # let coll = client.database("foo").collection::("bar"); - /// # let mut session = client.start_session().await?; - /// session.start_transaction(None).await?; - /// let result = coll.insert_one_with_session(doc! { "x": 1 }, None, &mut session).await?; - /// session.commit_transaction().await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn commit_transaction(&mut self) -> Result<()> { - match &mut self.transaction.state { - TransactionState::None => Err(ErrorKind::Transaction { - message: "no transaction started".into(), - } - .into()), - TransactionState::Aborted => Err(ErrorKind::Transaction { - message: "Cannot call commitTransaction after calling abortTransaction".into(), - } - .into()), - TransactionState::Starting => { - self.transaction.commit(false); - Ok(()) - } - TransactionState::InProgress => { - let commit_transaction = CommitTransaction::new(self.transaction.options.clone()); - self.transaction.commit(true); - self.client - .clone() - .execute_operation(commit_transaction, self) - .await - } - TransactionState::Committed { - data_committed: true, - } => { - let mut commit_transaction = - CommitTransaction::new(self.transaction.options.clone()); - commit_transaction.update_for_retry(); - self.client - .clone() - .execute_operation(commit_transaction, self) - .await - } - TransactionState::Committed { - data_committed: false, - } => Ok(()), - } - } - - /// Aborts the transaction that is currently active on this session. Any open transaction will - /// be aborted automatically in the `Drop` implementation of `ClientSession`. - /// - /// ```rust - /// # use mongodb::{bson::{doc, Document}, error::Result, Client, ClientSession, Collection}; - /// # - /// # async fn do_stuff() -> Result<()> { - /// # let client = Client::with_uri_str("mongodb://example.com").await?; - /// # let coll = client.database("foo").collection::("bar"); - /// # let mut session = client.start_session().await?; - /// session.start_transaction(None).await?; - /// match execute_transaction(&coll, &mut session).await { - /// Ok(_) => session.commit_transaction().await?, - /// Err(_) => session.abort_transaction().await?, - /// } - /// # Ok(()) - /// # } - /// - /// async fn execute_transaction(coll: &Collection, session: &mut ClientSession) -> Result<()> { - /// coll.insert_one_with_session(doc! { "x": 1 }, None, session).await?; - /// coll.delete_one(doc! { "y": 2 }).session(session).await?; - /// Ok(()) - /// } - /// ``` - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn abort_transaction(&mut self) -> Result<()> { - match self.transaction.state { - TransactionState::None => Err(ErrorKind::Transaction { - message: "no transaction started".into(), - } - .into()), - TransactionState::Committed { .. } => Err(ErrorKind::Transaction { - message: "Cannot call abortTransaction after calling commitTransaction".into(), - } - .into()), - TransactionState::Aborted => Err(ErrorKind::Transaction { - message: "cannot call abortTransaction twice".into(), - } - .into()), - TransactionState::Starting => { - self.transaction.abort(); - Ok(()) - } - TransactionState::InProgress => { - let write_concern = self - .transaction - .options - .as_ref() - .and_then(|options| options.write_concern.as_ref()) - .cloned(); - let abort_transaction = - AbortTransaction::new(write_concern, self.transaction.pinned.take()); - self.transaction.abort(); - // Errors returned from running an abortTransaction command should be ignored. - let _result = self - .client - .clone() - .execute_operation(abort_transaction, &mut *self) - .await; - Ok(()) - } - } - } - - /// Starts a transaction, runs the given callback, and commits or aborts the transaction. - /// Transient transaction errors will cause the callback or the commit to be retried; - /// other errors will cause the transaction to be aborted and the error returned to the - /// caller. If the callback needs to provide its own error information, the - /// [`Error::custom`](crate::error::Error::custom) method can accept an arbitrary payload that - /// can be retrieved via [`Error::get_custom`](crate::error::Error::get_custom). - /// - /// If a command inside the callback fails, it may cause the transaction on the server to be - /// aborted. This situation is normally handled transparently by the driver. However, if the - /// application does not return that error from the callback, the driver will not be able to - /// determine whether the transaction was aborted or not. The driver will then retry the - /// callback indefinitely. To avoid this situation, the application MUST NOT silently handle - /// errors within the callback. If the application needs to handle errors within the - /// callback, it MUST return them after doing so. - /// - /// Because the callback can be repeatedly executed and because it returns a future, the rust - /// closure borrowing rules for captured values can be overly restrictive. As a - /// convenience, `with_transaction` accepts a context argument that will be passed to the - /// callback along with the session: - /// - /// ```no_run - /// # use mongodb::{bson::{doc, Document}, error::Result, Client}; - /// # use futures::FutureExt; - /// # async fn wrapper() -> Result<()> { - /// # let client = Client::with_uri_str("mongodb://example.com").await?; - /// # let mut session = client.start_session().await?; - /// let coll = client.database("mydb").collection::("mycoll"); - /// let my_data = "my data".to_string(); - /// // This works: - /// session.with_transaction( - /// (&coll, &my_data), - /// |session, (coll, my_data)| async move { - /// coll.insert_one_with_session(doc! { "data": *my_data }, None, session).await - /// }.boxed(), - /// None, - /// ).await?; - /// /* This will not compile with a "variable moved due to use in generator" error: - /// session.with_transaction( - /// (), - /// |session, _| async move { - /// coll.insert_one_with_session(doc! { "data": my_data }, None, session).await - /// }.boxed(), - /// None, - /// ).await?; - /// */ - /// # Ok(()) - /// # } - /// ``` - pub async fn with_transaction( - &mut self, - mut context: C, - mut callback: F, - options: impl Into>, - ) -> Result - where - F: for<'a> FnMut(&'a mut ClientSession, &'a mut C) -> BoxFuture<'a, Result>, - { - let options = options.into(); - let timeout = Duration::from_secs(120); - #[cfg(test)] - let timeout = self.convenient_transaction_timeout.unwrap_or(timeout); - let start = Instant::now(); - - use crate::error::{TRANSIENT_TRANSACTION_ERROR, UNKNOWN_TRANSACTION_COMMIT_RESULT}; - - 'transaction: loop { - self.start_transaction(options.clone()).await?; - let ret = match callback(self, &mut context).await { - Ok(v) => v, - Err(e) => { - if matches!( - self.transaction.state, - TransactionState::Starting | TransactionState::InProgress - ) { - self.abort_transaction().await?; - } - if e.contains_label(TRANSIENT_TRANSACTION_ERROR) && start.elapsed() < timeout { - continue 'transaction; - } - return Err(e); - } - }; - if matches!( - self.transaction.state, - TransactionState::None - | TransactionState::Aborted - | TransactionState::Committed { .. } - ) { - return Ok(ret); - } - 'commit: loop { - match self.commit_transaction().await { - Ok(()) => return Ok(ret), - Err(e) => { - if e.is_max_time_ms_expired_error() || start.elapsed() >= timeout { - return Err(e); - } - if e.contains_label(UNKNOWN_TRANSACTION_COMMIT_RESULT) { - continue 'commit; - } - if e.contains_label(TRANSIENT_TRANSACTION_ERROR) { - continue 'transaction; - } - return Err(e); - } - } - } - } - } - fn default_transaction_options(&self) -> Option<&TransactionOptions> { self.options .as_ref() diff --git a/src/client/session/action.rs b/src/client/session/action.rs new file mode 100644 index 000000000..4e0040604 --- /dev/null +++ b/src/client/session/action.rs @@ -0,0 +1,388 @@ +use std::time::{Duration, Instant}; + +use crate::{ + action::{action_impl, AbortTransaction, CommitTransaction, StartTransaction}, + client::options::TransactionOptions, + error::{ErrorKind, Result}, + operation::{self, Operation}, + sdam::TransactionSupportStatus, + BoxFuture, + ClientSession, +}; + +use super::TransactionState; + +impl ClientSession { + async fn start_transaction_impl(&mut self, options: Option) -> Result<()> { + if self + .options + .as_ref() + .and_then(|o| o.snapshot) + .unwrap_or(false) + { + return Err(ErrorKind::Transaction { + message: "Transactions are not supported in snapshot sessions".into(), + } + .into()); + } + match self.transaction.state { + TransactionState::Starting | TransactionState::InProgress => { + return Err(ErrorKind::Transaction { + message: "transaction already in progress".into(), + } + .into()); + } + TransactionState::Committed { .. } => { + self.unpin(); // Unpin session if previous transaction is committed. + } + _ => {} + } + match self.client.transaction_support_status().await? { + TransactionSupportStatus::Supported => { + let mut options = match options { + Some(mut options) => { + if let Some(defaults) = self.default_transaction_options() { + merge_options!( + defaults, + options, + [ + read_concern, + write_concern, + selection_criteria, + max_commit_time + ] + ); + } + Some(options) + } + None => self.default_transaction_options().cloned(), + }; + resolve_options!( + self.client, + options, + [read_concern, write_concern, selection_criteria] + ); + + if let Some(ref options) = options { + if !options + .write_concern + .as_ref() + .map(|wc| wc.is_acknowledged()) + .unwrap_or(true) + { + return Err(ErrorKind::Transaction { + message: "transactions do not support unacknowledged write concerns" + .into(), + } + .into()); + } + } + + self.increment_txn_number(); + self.transaction.start(options); + Ok(()) + } + _ => Err(ErrorKind::Transaction { + message: "Transactions are not supported by this deployment".into(), + } + .into()), + } + } +} + +#[action_impl] +impl<'a> Action for StartTransaction<&'a mut ClientSession> { + type Future = StartTransactionFuture; + + async fn execute(self) -> Result<()> { + self.session.start_transaction_impl(self.options).await + } +} + +impl<'a> StartTransaction<&'a mut ClientSession> { + /// Starts a transaction, runs the given callback, and commits or aborts the transaction. + /// Transient transaction errors will cause the callback or the commit to be retried; + /// other errors will cause the transaction to be aborted and the error returned to the + /// caller. If the callback needs to provide its own error information, the + /// [`Error::custom`](crate::error::Error::custom) method can accept an arbitrary payload that + /// can be retrieved via [`Error::get_custom`](crate::error::Error::get_custom). + /// + /// If a command inside the callback fails, it may cause the transaction on the server to be + /// aborted. This situation is normally handled transparently by the driver. However, if the + /// application does not return that error from the callback, the driver will not be able to + /// determine whether the transaction was aborted or not. The driver will then retry the + /// callback indefinitely. To avoid this situation, the application MUST NOT silently handle + /// errors within the callback. If the application needs to handle errors within the + /// callback, it MUST return them after doing so. + /// + /// Because the callback can be repeatedly executed and because it returns a future, the rust + /// closure borrowing rules for captured values can be overly restrictive. As a + /// convenience, `with_transaction` accepts a context argument that will be passed to the + /// callback along with the session: + /// + /// ```no_run + /// # use mongodb::{bson::{doc, Document}, error::Result, Client}; + /// # use futures::FutureExt; + /// # async fn wrapper() -> Result<()> { + /// # let client = Client::with_uri_str("mongodb://example.com").await?; + /// # let mut session = client.start_session().await?; + /// let coll = client.database("mydb").collection::("mycoll"); + /// let my_data = "my data".to_string(); + /// // This works: + /// session.start_transaction().and_run( + /// (&coll, &my_data), + /// |session, (coll, my_data)| async move { + /// coll.insert_one(doc! { "data": *my_data }).session(session).await + /// }.boxed() + /// ).await?; + /// /* This will not compile with a "variable moved due to use in generator" error: + /// session.start_transaction().and_run( + /// (), + /// |session, _| async move { + /// coll.insert_one(doc! { "data": my_data }).session(session).await + /// }.boxed() + /// ).await?; + /// */ + /// # Ok(()) + /// # } + /// ``` + pub async fn and_run(self, mut context: C, mut callback: F) -> Result + where + F: for<'b> FnMut(&'b mut ClientSession, &'b mut C) -> BoxFuture<'b, Result>, + { + let timeout = Duration::from_secs(120); + #[cfg(test)] + let timeout = self + .session + .convenient_transaction_timeout + .unwrap_or(timeout); + let start = Instant::now(); + + use crate::error::{TRANSIENT_TRANSACTION_ERROR, UNKNOWN_TRANSACTION_COMMIT_RESULT}; + + 'transaction: loop { + self.session + .start_transaction() + .with_options(self.options.clone()) + .await?; + let ret = match callback(self.session, &mut context).await { + Ok(v) => v, + Err(e) => { + if matches!( + self.session.transaction.state, + TransactionState::Starting | TransactionState::InProgress + ) { + self.session.abort_transaction().await?; + } + if e.contains_label(TRANSIENT_TRANSACTION_ERROR) && start.elapsed() < timeout { + continue 'transaction; + } + return Err(e); + } + }; + if matches!( + self.session.transaction.state, + TransactionState::None + | TransactionState::Aborted + | TransactionState::Committed { .. } + ) { + return Ok(ret); + } + 'commit: loop { + match self.session.commit_transaction().await { + Ok(()) => return Ok(ret), + Err(e) => { + if e.is_max_time_ms_expired_error() || start.elapsed() >= timeout { + return Err(e); + } + if e.contains_label(UNKNOWN_TRANSACTION_COMMIT_RESULT) { + continue 'commit; + } + if e.contains_label(TRANSIENT_TRANSACTION_ERROR) { + continue 'transaction; + } + return Err(e); + } + } + } + } + } +} + +#[cfg(feature = "sync")] +impl<'a> StartTransaction<&'a mut crate::sync::ClientSession> { + /// Synchronously execute this action. + pub fn run(self) -> Result<()> { + crate::sync::TOKIO_RUNTIME.block_on( + self.session + .async_client_session + .start_transaction_impl(self.options), + ) + } + + /// Starts a transaction, runs the given callback, and commits or aborts the transaction. + /// Transient transaction errors will cause the callback or the commit to be retried; + /// other errors will cause the transaction to be aborted and the error returned to the + /// caller. If the callback needs to provide its own error information, the + /// [`Error::custom`](crate::error::Error::custom) method can accept an arbitrary payload that + /// can be retrieved via [`Error::get_custom`](crate::error::Error::get_custom). + /// + /// If a command inside the callback fails, it may cause the transaction on the server to be + /// aborted. This situation is normally handled transparently by the driver. However, if the + /// application does not return that error from the callback, the driver will not be able to + /// determine whether the transaction was aborted or not. The driver will then retry the + /// callback indefinitely. To avoid this situation, the application MUST NOT silently handle + /// errors within the callback. If the application needs to handle errors within the + /// callback, it MUST return them after doing so. + pub fn and_run(self, mut callback: F) -> Result + where + F: for<'b> FnMut(&'b mut crate::sync::ClientSession) -> Result, + { + let timeout = std::time::Duration::from_secs(120); + let start = std::time::Instant::now(); + + use crate::error::{TRANSIENT_TRANSACTION_ERROR, UNKNOWN_TRANSACTION_COMMIT_RESULT}; + + 'transaction: loop { + self.session + .start_transaction() + .with_options(self.options.clone()) + .run()?; + let ret = match callback(self.session) { + Ok(v) => v, + Err(e) => { + if matches!( + self.session.async_client_session.transaction.state, + TransactionState::Starting | TransactionState::InProgress + ) { + self.session.abort_transaction().run()?; + } + if e.contains_label(TRANSIENT_TRANSACTION_ERROR) && start.elapsed() < timeout { + continue 'transaction; + } + return Err(e); + } + }; + if matches!( + self.session.async_client_session.transaction.state, + TransactionState::None + | TransactionState::Aborted + | TransactionState::Committed { .. } + ) { + return Ok(ret); + } + 'commit: loop { + match self.session.commit_transaction().run() { + Ok(()) => return Ok(ret), + Err(e) => { + if e.is_max_time_ms_expired_error() || start.elapsed() >= timeout { + return Err(e); + } + if e.contains_label(UNKNOWN_TRANSACTION_COMMIT_RESULT) { + continue 'commit; + } + if e.contains_label(TRANSIENT_TRANSACTION_ERROR) { + continue 'transaction; + } + return Err(e); + } + } + } + } + } +} + +#[action_impl] +impl<'a> Action for CommitTransaction<'a> { + type Future = CommitTransactionFuture; + + async fn execute(self) -> Result<()> { + match &mut self.session.transaction.state { + TransactionState::None => Err(ErrorKind::Transaction { + message: "no transaction started".into(), + } + .into()), + TransactionState::Aborted => Err(ErrorKind::Transaction { + message: "Cannot call commitTransaction after calling abortTransaction".into(), + } + .into()), + TransactionState::Starting => { + self.session.transaction.commit(false); + Ok(()) + } + TransactionState::InProgress => { + let commit_transaction = + operation::CommitTransaction::new(self.session.transaction.options.clone()); + self.session.transaction.commit(true); + self.session + .client + .clone() + .execute_operation(commit_transaction, self.session) + .await + } + TransactionState::Committed { + data_committed: true, + } => { + let mut commit_transaction = + operation::CommitTransaction::new(self.session.transaction.options.clone()); + commit_transaction.update_for_retry(); + self.session + .client + .clone() + .execute_operation(commit_transaction, self.session) + .await + } + TransactionState::Committed { + data_committed: false, + } => Ok(()), + } + } +} + +#[action_impl] +impl<'a> Action for AbortTransaction<'a> { + type Future = AbortTransactionFuture; + + async fn execute(self) -> Result<()> { + match self.session.transaction.state { + TransactionState::None => Err(ErrorKind::Transaction { + message: "no transaction started".into(), + } + .into()), + TransactionState::Committed { .. } => Err(ErrorKind::Transaction { + message: "Cannot call abortTransaction after calling commitTransaction".into(), + } + .into()), + TransactionState::Aborted => Err(ErrorKind::Transaction { + message: "cannot call abortTransaction twice".into(), + } + .into()), + TransactionState::Starting => { + self.session.transaction.abort(); + Ok(()) + } + TransactionState::InProgress => { + let write_concern = self + .session + .transaction + .options + .as_ref() + .and_then(|options| options.write_concern.as_ref()) + .cloned(); + let abort_transaction = operation::AbortTransaction::new( + write_concern, + self.session.transaction.pinned.take(), + ); + self.session.transaction.abort(); + // Errors returned from running an abortTransaction command should be ignored. + let _result = self + .session + .client + .clone() + .execute_operation(abort_transaction, &mut *self.session) + .await; + Ok(()) + } + } + } +} diff --git a/src/client/session/test.rs b/src/client/session/test.rs index 62fa47ad1..719692e3f 100644 --- a/src/client/session/test.rs +++ b/src/client/session/test.rs @@ -5,15 +5,23 @@ use std::{future::Future, sync::Arc, time::Duration}; use bson::Document; use futures::stream::StreamExt; +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ bson::{doc, Bson}, - coll::options::{CountOptions, InsertManyOptions}, + coll::options::CountOptions, error::Result, event::sdam::SdamEvent, - options::{Acknowledgment, FindOptions, ReadConcern, ReadPreference, WriteConcern}, + options::{FindOptions, ReadConcern, ReadPreference, WriteConcern}, sdam::ServerInfo, selection_criteria::SelectionCriteria, - test::{get_client_options, log_uncaptured, Event, EventClient, EventHandler, TestClient}, + test::{ + get_client_options, + log_uncaptured, + util::event_buffer::EventBuffer, + Event, + TestClient, + }, Client, Collection, }; @@ -58,16 +66,12 @@ macro_rules! for_each_op { // collection operations $test_func( "insert", - collection_op!($test_name, coll, coll.insert_one(doc! { "x": 1 }, None)), + collection_op!($test_name, coll, coll.insert_one(doc! { "x": 1 })), ) .await; $test_func( "insert", - collection_op!( - $test_name, - coll, - coll.insert_many(vec![doc! { "x": 1 }], None) - ), + collection_op!($test_name, coll, coll.insert_many(vec![doc! { "x": 1 }])), ) .await; $test_func( @@ -75,7 +79,7 @@ macro_rules! for_each_op { collection_op!( $test_name, coll, - coll.replace_one(doc! { "x": 1 }, doc! { "x": 2 }, None) + coll.replace_one(doc! { "x": 1 }, doc! { "x": 2 }) ), ) .await; @@ -109,11 +113,7 @@ macro_rules! for_each_op { .await; $test_func( "findAndModify", - collection_op!( - $test_name, - coll, - coll.find_one_and_delete(doc! { "x": 1 }, None) - ), + collection_op!($test_name, coll, coll.find_one_and_delete(doc! { "x": 1 })), ) .await; $test_func( @@ -121,7 +121,7 @@ macro_rules! for_each_op { collection_op!( $test_name, coll, - coll.find_one_and_update(doc! {}, doc! { "$inc": { "x": 1 } }, None) + coll.find_one_and_update(doc! {}, doc! { "$inc": { "x": 1 } }) ), ) .await; @@ -130,7 +130,7 @@ macro_rules! for_each_op { collection_op!( $test_name, coll, - coll.find_one_and_replace(doc! {}, doc! {"x": 1}, None) + coll.find_one_and_replace(doc! {}, doc! {"x": 1}) ), ) .await; @@ -145,12 +145,12 @@ macro_rules! for_each_op { .await; $test_func( "find", - collection_op!($test_name, coll, coll.find(doc! { "x": 1 }, None)), + collection_op!($test_name, coll, coll.find(doc! { "x": 1 })), ) .await; $test_func( "find", - collection_op!($test_name, coll, coll.find_one(doc! { "x": 1 }, None)), + collection_op!($test_name, coll, coll.find_one(doc! { "x": 1 })), ) .await; $test_func( @@ -239,13 +239,14 @@ async fn cluster_time_in_commands() { async fn cluster_time_test( command_name: &str, client: &Client, - event_handler: &EventHandler, + event_buffer: &EventBuffer, operation: F, ) where F: Fn(Client) -> G, G: Future>, { - let mut subscriber = event_handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = event_buffer.subscribe(); operation(client.clone()) .await @@ -292,11 +293,11 @@ async fn cluster_time_in_commands() { ); } - let handler = Arc::new(EventHandler::new()); + let buffer = EventBuffer::new(); let mut options = get_client_options().await.clone(); options.heartbeat_freq = Some(Duration::from_secs(1000)); - options.command_event_handler = Some(handler.clone().into()); - options.sdam_event_handler = Some(handler.clone().into()); + options.command_event_handler = Some(buffer.handler()); + options.sdam_event_handler = Some(buffer.handler()); // Ensure we only connect to one server so the monitor checks from other servers // don't affect the TopologyDescription's clusterTime value between commands. @@ -312,7 +313,8 @@ async fn cluster_time_in_commands() { } } - let mut subscriber = handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); let client = Client::with_options(options).unwrap(); @@ -336,7 +338,7 @@ async fn cluster_time_in_commands() { .await .unwrap(); - cluster_time_test("ping", &client, handler.as_ref(), |client| async move { + cluster_time_test("ping", &client, &buffer, |client| async move { client .database(function_name!()) .run_command(doc! { "ping": 1 }) @@ -344,34 +346,29 @@ async fn cluster_time_in_commands() { }) .await; - cluster_time_test( - "aggregate", - &client, - handler.as_ref(), - |client| async move { - client - .database(function_name!()) - .collection::(function_name!()) - .aggregate(vec![doc! { "$match": { "x": 1 } }]) - .await - }, - ) + cluster_time_test("aggregate", &client, &buffer, |client| async move { + client + .database(function_name!()) + .collection::(function_name!()) + .aggregate(vec![doc! { "$match": { "x": 1 } }]) + .await + }) .await; - cluster_time_test("find", &client, handler.as_ref(), |client| async move { + cluster_time_test("find", &client, &buffer, |client| async move { client .database(function_name!()) .collection::(function_name!()) - .find(doc! {}, None) + .find(doc! {}) .await }) .await; - cluster_time_test("insert", &client, handler.as_ref(), |client| async move { + cluster_time_test("insert", &client, &buffer, |client| async move { client .database(function_name!()) .collection::(function_name!()) - .insert_one(doc! {}, None) + .insert_one(doc! {}) .await }) .await; @@ -386,6 +383,7 @@ async fn session_usage() { return; } + #[allow(deprecated)] async fn session_usage_test(command_name: &str, operation: F) where F: Fn(EventClient) -> G, @@ -393,7 +391,9 @@ async fn session_usage() { { let client = EventClient::new().await; operation(client.clone()).await; - let (command_started, _) = client.get_successful_command_execution(command_name); + let mut events = client.events.clone(); + #[allow(deprecated)] + let (command_started, _) = events.get_successful_command_execution(command_name); assert!( command_started.command.get("lsid").is_some(), "implicit session not passed to {}", @@ -408,6 +408,7 @@ async fn session_usage() { #[tokio::test] #[function_name::named] async fn implicit_session_returned_after_immediate_exhaust() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { return; @@ -416,7 +417,7 @@ async fn implicit_session_returned_after_immediate_exhaust() { let coll = client .init_db_and_coll(function_name!(), function_name!()) .await; - coll.insert_many(vec![doc! {}, doc! {}], None) + coll.insert_many(vec![doc! {}, doc! {}]) .await .expect("insert should succeed"); @@ -424,10 +425,14 @@ async fn implicit_session_returned_after_immediate_exhaust() { tokio::time::sleep(Duration::from_millis(250)).await; client.clear_session_pool().await; - let mut cursor = coll.find(doc! {}, None).await.expect("find should succeed"); + let mut cursor = coll.find(doc! {}).await.expect("find should succeed"); assert!(matches!(cursor.next().await, Some(Ok(_)))); - let (find_started, _) = client.get_successful_command_execution("find"); + #[allow(deprecated)] + let (find_started, _) = { + let mut events = client.events.clone(); + events.get_successful_command_execution("find") + }; let session_id = find_started .command .get("lsid") @@ -448,6 +453,7 @@ async fn implicit_session_returned_after_immediate_exhaust() { #[tokio::test] #[function_name::named] async fn implicit_session_returned_after_exhaust_by_get_more() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { return; @@ -457,7 +463,7 @@ async fn implicit_session_returned_after_exhaust_by_get_more() { .init_db_and_coll(function_name!(), function_name!()) .await; for _ in 0..5 { - coll.insert_one(doc! {}, None) + coll.insert_one(doc! {}) .await .expect("insert should succeed"); } @@ -466,9 +472,9 @@ async fn implicit_session_returned_after_exhaust_by_get_more() { tokio::time::sleep(Duration::from_millis(250)).await; client.clear_session_pool().await; - let options = FindOptions::builder().batch_size(3).build(); let mut cursor = coll - .find(doc! {}, options) + .find(doc! {}) + .batch_size(3) .await .expect("find should succeed"); @@ -476,7 +482,12 @@ async fn implicit_session_returned_after_exhaust_by_get_more() { assert!(matches!(cursor.next().await, Some(Ok(_)))); } - let (find_started, _) = client.get_successful_command_execution("find"); + #[allow(deprecated)] + let (find_started, _) = { + let mut events = client.events.clone(); + events.get_successful_command_execution("find") + }; + let session_id = find_started .command .get("lsid") @@ -497,6 +508,7 @@ async fn implicit_session_returned_after_exhaust_by_get_more() { #[tokio::test] #[function_name::named] async fn find_and_getmore_share_session() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { log_uncaptured( @@ -509,10 +521,10 @@ async fn find_and_getmore_share_session() { .init_db_and_coll(function_name!(), function_name!()) .await; - let options = InsertManyOptions::builder() - .write_concern(WriteConcern::builder().w(Acknowledgment::Majority).build()) - .build(); - coll.insert_many(vec![doc! {}; 3], options).await.unwrap(); + coll.insert_many(vec![doc! {}; 3]) + .write_concern(WriteConcern::majority()) + .await + .unwrap(); let read_preferences: Vec = vec![ ReadPreference::Primary, @@ -530,6 +542,7 @@ async fn find_and_getmore_share_session() { }, ]; + #[allow(deprecated)] async fn run_test( client: &EventClient, coll: &Collection, @@ -545,7 +558,8 @@ async fn find_and_getmore_share_session() { let mut cursor; loop { cursor = coll - .find(doc! {}, options.clone()) + .find(doc! {}) + .with_options(options.clone()) .await .expect("find should succeed"); if cursor.has_next() { @@ -571,14 +585,17 @@ async fn find_and_getmore_share_session() { }); } - let (find_started, _) = client.get_successful_command_execution("find"); + let mut events = client.events.clone(); + #[allow(deprecated)] + let (find_started, _) = events.get_successful_command_execution("find"); let session_id = find_started .command .get("lsid") .expect("find should use implicit session"); assert!(session_id != &Bson::Null); - let (command_started, _) = client.get_successful_command_execution("getMore"); + #[allow(deprecated)] + let (command_started, _) = events.get_successful_command_execution("getMore"); let getmore_session_id = command_started .command .get("lsid") diff --git a/src/client/session/test/causal_consistency.rs b/src/client/session/test/causal_consistency.rs index 2b51f12f6..0fbc3c753 100644 --- a/src/client/session/test/causal_consistency.rs +++ b/src/client/session/test/causal_consistency.rs @@ -1,12 +1,14 @@ use bson::{doc, Document}; use futures::{future::BoxFuture, FutureExt}; +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ coll::options::CollectionOptions, error::Result, event::command::CommandEvent, options::ReadConcern, - test::{log_uncaptured, EventClient}, + test::log_uncaptured, ClientSession, Collection, }; @@ -45,21 +47,20 @@ fn all_session_ops() -> impl Iterator { let mut ops = vec![]; ops.push(op!("insert", false, |coll, session| { - coll.insert_one_with_session(doc! { "x": 1 }, None, session) + coll.insert_one(doc! { "x": 1 }).session(session) })); ops.push(op!("insert", false, |coll, session| { - coll.insert_many_with_session(vec![doc! { "x": 1 }], None, session) + coll.insert_many(vec![doc! { "x": 1 }]).session(session) })); ops.push(op!("find", true, |coll, session| coll - .find_one_with_session(doc! { "x": 1 }, None, session))); + .find_one(doc! { "x": 1 }) + .session(session))); - ops.push(op!("find", true, |coll, session| coll.find_with_session( - doc! { "x": 1 }, - None, - session - ))); + ops.push(op!("find", true, |coll, session| coll + .find(doc! { "x": 1 }) + .session(session))); ops.push(op!("update", false, |coll, s| coll .update_one(doc! { "x": 1 }, doc! { "$inc": { "x": 1 } },) @@ -70,12 +71,8 @@ fn all_session_ops() -> impl Iterator { .session(s))); ops.push(op!("update", false, |coll, s| coll - .replace_one_with_session( - doc! { "x": 1 }, - doc! { "x": 2 }, - None, - s, - ))); + .replace_one(doc! { "x": 1 }, doc! { "x": 2 },) + .session(s))); ops.push(op!("delete", false, |coll, s| coll .delete_one(doc! { "x": 1 }) @@ -86,23 +83,16 @@ fn all_session_ops() -> impl Iterator { .session(s))); ops.push(op!("findAndModify", false, |coll, s| coll - .find_one_and_update_with_session( - doc! { "x": 1 }, - doc! { "$inc": { "x": 1 } }, - None, - s, - ))); + .find_one_and_update(doc! { "x": 1 }, doc! { "$inc": { "x": 1 } },) + .session(s))); ops.push(op!("findAndModify", false, |coll, s| coll - .find_one_and_replace_with_session( - doc! { "x": 1 }, - doc! { "x": 1 }, - None, - s, - ))); + .find_one_and_replace(doc! { "x": 1 }, doc! { "x": 1 },) + .session(s))); ops.push(op!("findAndModify", false, |coll, s| coll - .find_one_and_delete_with_session(doc! { "x": 1 }, None, s,))); + .find_one_and_delete(doc! { "x": 1 }) + .session(s))); ops.push(op!("aggregate", true, |coll, s| coll .count_documents(doc! { "x": 1 }) @@ -129,6 +119,7 @@ fn all_session_ops() -> impl Iterator { /// Test 1 from the causal consistency specification. #[tokio::test] async fn new_session_operation_time_null() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { @@ -145,6 +136,7 @@ async fn new_session_operation_time_null() { /// Test 2 from the causal consistency specification. #[tokio::test] async fn first_read_no_after_cluser_time() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { @@ -170,7 +162,11 @@ async fn first_read_no_after_cluser_time() { ) .await .unwrap_or_else(|e| panic!("{} failed: {}", name, e)); - let (started, _) = client.get_successful_command_execution(name); + #[allow(deprecated)] + let (started, _) = { + let mut events = client.events.clone(); + events.get_successful_command_execution(name) + }; // assert that no read concern was set. started.command.get_document("readConcern").unwrap_err(); @@ -180,6 +176,7 @@ async fn first_read_no_after_cluser_time() { /// Test 3 from the causal consistency specification. #[tokio::test] async fn first_op_update_op_time() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { @@ -204,11 +201,15 @@ async fn first_op_update_op_time() { .await .unwrap(); - let event = client - .get_command_events(&[name]) - .into_iter() - .find(|e| matches!(e, CommandEvent::Succeeded(_) | CommandEvent::Failed(_))) - .unwrap_or_else(|| panic!("no event found for {}", name)); + #[allow(deprecated)] + let event = { + let mut events = client.events.clone(); + events + .get_command_events(&[name]) + .into_iter() + .find(|e| matches!(e, CommandEvent::Succeeded(_) | CommandEvent::Failed(_))) + .unwrap_or_else(|| panic!("no event found for {}", name)) + }; match event { CommandEvent::Succeeded(s) => { @@ -226,6 +227,7 @@ async fn first_op_update_op_time() { /// Test 4 from the causal consistency specification. #[tokio::test] async fn read_includes_after_cluster_time() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { @@ -242,13 +244,13 @@ async fn read_includes_after_cluster_time() { for op in all_session_ops().filter(|o| o.is_read) { let command_name = op.name; let mut session = client.start_session().await.unwrap(); - coll.find_one_with_session(None, None, &mut session) - .await - .unwrap(); + coll.find_one(doc! {}).session(&mut session).await.unwrap(); let op_time = session.operation_time().unwrap(); op.execute(coll.clone(), &mut session).await.unwrap(); + #[allow(deprecated)] let command_started = client + .events .get_command_started_events(&[command_name]) .pop() .unwrap(); @@ -268,6 +270,7 @@ async fn read_includes_after_cluster_time() { /// Test 5 from the causal consistency specification. #[tokio::test] async fn find_after_write_includes_after_cluster_time() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { @@ -290,11 +293,14 @@ async fn find_after_write_includes_after_cluster_time() { .unwrap(); op.execute(coll.clone(), &mut session).await.unwrap(); let op_time = session.operation_time().unwrap(); - coll.find_one_with_session(None, None, &mut session) - .await - .unwrap(); + coll.find_one(doc! {}).session(&mut session).await.unwrap(); - let command_started = client.get_command_started_events(&["find"]).pop().unwrap(); + #[allow(deprecated)] + let command_started = client + .events + .get_command_started_events(&["find"]) + .pop() + .unwrap(); assert_eq!( command_started .command @@ -310,6 +316,7 @@ async fn find_after_write_includes_after_cluster_time() { /// Test 6 from the causal consistency specification. #[tokio::test] async fn not_causally_consistent_omits_after_cluster_time() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { @@ -334,7 +341,9 @@ async fn not_causally_consistent_omits_after_cluster_time() { .unwrap(); op.execute(coll.clone(), &mut session).await.unwrap(); + #[allow(deprecated)] let command_started = client + .events .get_command_started_events(&[command_name]) .pop() .unwrap(); @@ -348,6 +357,7 @@ async fn not_causally_consistent_omits_after_cluster_time() { /// Test 7 from the causal consistency specification. #[tokio::test] async fn omit_after_cluster_time_standalone() { + #[allow(deprecated)] let client = EventClient::new().await; if !client.is_standalone() { @@ -369,7 +379,9 @@ async fn omit_after_cluster_time_standalone() { .unwrap(); op.execute(coll.clone(), &mut session).await.unwrap(); + #[allow(deprecated)] let command_started = client + .events .get_command_started_events(&[command_name]) .pop() .unwrap(); @@ -383,6 +395,7 @@ async fn omit_after_cluster_time_standalone() { /// Test 8 from the causal consistency specification. #[tokio::test] async fn omit_default_read_concern_level() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { @@ -404,13 +417,13 @@ async fn omit_default_read_concern_level() { .causal_consistency(true) .await .unwrap(); - coll.find_one_with_session(None, None, &mut session) - .await - .unwrap(); + coll.find_one(doc! {}).session(&mut session).await.unwrap(); let op_time = session.operation_time().unwrap(); op.execute(coll.clone(), &mut session).await.unwrap(); + #[allow(deprecated)] let command_started = client + .events .get_command_started_events(&[command_name]) .pop() .unwrap(); @@ -424,6 +437,7 @@ async fn omit_default_read_concern_level() { /// Test 9 from the causal consistency specification. #[tokio::test] async fn test_causal_consistency_read_concern_merge() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { log_uncaptured( @@ -451,13 +465,13 @@ async fn test_causal_consistency_read_concern_merge() { for op in all_session_ops().filter(|o| o.is_read) { let command_name = op.name; - coll.find_one_with_session(None, None, &mut session) - .await - .unwrap(); + coll.find_one(doc! {}).session(&mut session).await.unwrap(); let op_time = session.operation_time().unwrap(); op.execute(coll.clone(), &mut session).await.unwrap(); + #[allow(deprecated)] let command_started = client + .events .get_command_started_events(&[command_name]) .pop() .unwrap(); @@ -474,6 +488,7 @@ async fn test_causal_consistency_read_concern_merge() { /// Test 11 from the causal consistency specification. #[tokio::test] async fn omit_cluster_time_standalone() { + #[allow(deprecated)] let client = EventClient::new().await; if !client.is_standalone() { log_uncaptured("skipping omit_cluster_time_standalone due to unsupported topology"); @@ -484,15 +499,20 @@ async fn omit_cluster_time_standalone() { .database("causal_consistency_11") .collection::("causal_consistency_11"); - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); - let (started, _) = client.get_successful_command_execution("find"); + #[allow(deprecated)] + let (started, _) = { + let mut events = client.events.clone(); + events.get_successful_command_execution("find") + }; started.command.get_document("$clusterTime").unwrap_err(); } /// Test 12 from the causal consistency specification. #[tokio::test] async fn cluster_time_sent_in_commands() { + #[allow(deprecated)] let client = EventClient::new().await; if client.is_standalone() { log_uncaptured("skipping cluster_time_sent_in_commands due to unsupported topology"); @@ -503,8 +523,11 @@ async fn cluster_time_sent_in_commands() { .database("causal_consistency_12") .collection::("causal_consistency_12"); - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); - let (started, _) = client.get_successful_command_execution("find"); + #[allow(deprecated)] + let mut events = client.events.clone(); + #[allow(deprecated)] + let (started, _) = events.get_successful_command_execution("find"); started.command.get_document("$clusterTime").unwrap(); } diff --git a/src/cmap/conn.rs b/src/cmap/conn.rs index 812ae9a7d..f6e8876ac 100644 --- a/src/cmap/conn.rs +++ b/src/cmap/conn.rs @@ -14,12 +14,18 @@ use tokio::{ sync::{mpsc, Mutex}, }; +#[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" +))] +use crate::options::Compressor; + use self::wire::{Message, MessageFlags}; use super::manager::PoolManager; use crate::{ bson::oid::ObjectId, cmap::PoolGeneration, - compression::Compressor, error::{load_balanced_mode_mismatch, Error, ErrorKind, Result}, event::cmap::{ CmapEventEmitter, @@ -46,14 +52,7 @@ pub struct ConnectionInfo { /// A server-generated identifier that uniquely identifies the connection. Available on server /// versions 4.2+. This may be used to correlate driver connections with server logs. - /// If the connection ID sent by the server is too large for an i32, this will be a truncated - /// value. - pub server_id: Option, - - /// A server-generated identifier that uniquely identifies the connection. Available on server - /// versions 4.2+. This may be used to correlate driver connections with server logs. This - /// value will not be truncated and should be used rather than `server_id`. - pub server_id_i64: Option, + pub server_id: Option, /// The address that the connection is connected to. pub address: ServerAddress, @@ -104,12 +103,13 @@ pub(crate) struct Connection { stream: BufStream, - /// Compressor that the client will use before sending messages. - /// This compressor does not get used to decompress server messages. - /// The client will decompress server messages using whichever compressor - /// the server indicates in its message. This compressor is the first - /// compressor in the client's compressor list that also appears in the - /// server's compressor list. + /// Compressor to use to compress outgoing messages. This compressor is not used to decompress + /// incoming messages from the server. + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] pub(super) compressor: Option, /// If the connection is pinned to a cursor or transaction, the channel sender to return this @@ -120,6 +120,10 @@ pub(crate) struct Connection { /// monitoring connections as we do not emit events for those. #[derivative(Debug = "ignore")] event_emitter: Option, + + /// The token callback for OIDC authentication. + #[derivative(Debug = "ignore")] + pub(crate) oidc_token_gen_id: std::sync::RwLock, } impl Connection { @@ -144,8 +148,14 @@ impl Connection { stream_description: None, error: None, pinned_sender: None, + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] compressor: None, more_to_come: false, + oidc_token_gen_id: std::sync::RwLock::new(0), } } @@ -183,8 +193,7 @@ impl Connection { pub(crate) fn info(&self) -> ConnectionInfo { ConnectionInfo { id: self.id, - server_id: self.server_id.map(|value| value as i32), - server_id_i64: self.server_id, + server_id: self.server_id, address: self.address.clone(), } } @@ -275,7 +284,8 @@ impl Connection { pub(crate) async fn send_message( &mut self, message: Message, - to_compress: bool, + // This value is only read if a compression feature flag is enabled. + #[allow(unused_variables)] can_compress: bool, ) -> Result { if self.more_to_come { return Err(Error::internal(format!( @@ -286,16 +296,25 @@ impl Connection { self.command_executing = true; - // If the client has agreed on a compressor with the server, and the command - // is the right type of command, then compress the message. + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] let write_result = match self.compressor { - Some(ref compressor) if to_compress => { + Some(ref compressor) if can_compress => { message - .write_compressed_to(&mut self.stream, compressor) + .write_op_compressed_to(&mut self.stream, compressor) .await } - _ => message.write_to(&mut self.stream).await, + _ => message.write_op_msg_to(&mut self.stream).await, }; + #[cfg(all( + not(feature = "zstd-compression"), + not(feature = "zlib-compression"), + not(feature = "snappy-compression") + ))] + let write_result = message.write_op_msg_to(&mut self.stream).await; if let Err(ref err) = write_result { self.error = Some(err.clone()); @@ -433,8 +452,14 @@ impl Connection { pool_manager: None, ready_and_available_time: None, pinned_sender: self.pinned_sender.clone(), + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] compressor: self.compressor.clone(), more_to_come: false, + oidc_token_gen_id: std::sync::RwLock::new(0), } } diff --git a/src/cmap/conn/wire/message.rs b/src/cmap/conn/wire/message.rs index 7747949bc..032bd3715 100644 --- a/src/cmap/conn/wire/message.rs +++ b/src/cmap/conn/wire/message.rs @@ -5,19 +5,27 @@ use bson::{doc, Array, Document}; use serde::Serialize; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; -use super::{ - header::{Header, OpCode}, - next_request_id, -}; +#[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" +))] +use crate::options::Compressor; use crate::{ bson::RawDocumentBuf, bson_util, + checked::Checked, cmap::{conn::wire::util::SyncCountReader, Command}, - compression::{Compressor, Decoder}, + compression::decompress::decompress_message, error::{Error, ErrorKind, Result}, runtime::SyncLittleEndianRead, }; +use super::{ + header::{Header, OpCode}, + next_request_id, +}; + /// Represents an OP_MSG wire protocol operation. #[derive(Debug)] pub(crate) struct Message { @@ -101,7 +109,7 @@ impl Message { return Self::read_from_op_msg(reader, &header).await; } if header.op_code == OpCode::Compressed { - return Self::read_from_op_compressed(reader, &header).await; + return Self::read_op_compressed_from(reader, &header).await; } Err(Error::new( @@ -121,30 +129,30 @@ impl Message { mut reader: T, header: &Header, ) -> Result { - // TODO: RUST-616 ensure length is < maxMessageSizeBytes - let length_remaining = header.length - Header::LENGTH as i32; - let mut buf = vec![0u8; length_remaining as usize]; + let length = Checked::::try_from(header.length)?; + let length_remaining = length - Header::LENGTH; + let mut buf = vec![0u8; length_remaining.get()?]; reader.read_exact(&mut buf).await?; let reader = buf.as_slice(); - Self::read_op_common(reader, length_remaining, header) + Self::read_op_common(reader, length_remaining.get()?, header) } - async fn read_from_op_compressed( + async fn read_op_compressed_from( mut reader: T, header: &Header, ) -> Result { - let length_remaining = header.length - Header::LENGTH as i32; - let mut buf = vec![0u8; length_remaining as usize]; - reader.read_exact(&mut buf).await?; - let mut reader = buf.as_slice(); + let length = Checked::::try_from(header.length)?; + let length_remaining = length - Header::LENGTH; + let mut buffer = vec![0u8; length_remaining.get()?]; + reader.read_exact(&mut buffer).await?; + let mut compressed = buffer.as_slice(); - // Read original opcode (should be OP_MSG) - let original_opcode = reader.read_i32_sync()?; + let original_opcode = compressed.read_i32_sync()?; if original_opcode != OpCode::Message as i32 { return Err(ErrorKind::InvalidResponse { message: format!( - "The original opcode of the compressed message must be {}, but was {}.", + "The original opcode of the compressed message must be {}, but was {}", OpCode::Message as i32, original_opcode, ), @@ -152,50 +160,38 @@ impl Message { .into()); } - // Read uncompressed size - let uncompressed_size = reader.read_i32_sync()?; - - // Read compressor id - let compressor_id: u8 = reader.read_u8_sync()?; + let uncompressed_size = Checked::::try_from(compressed.read_i32_sync()?)?; + let compressor_id: u8 = compressed.read_u8_sync()?; + let decompressed = decompress_message(compressed, compressor_id)?; - // Get decoder - let decoder = Decoder::from_u8(compressor_id)?; - - // Decode message - let decoded_message = decoder.decode(reader)?; - - // Check that claimed length matches original length - if decoded_message.len() as i32 != uncompressed_size { + if decompressed.len() != uncompressed_size.get()? { return Err(ErrorKind::InvalidResponse { message: format!( "The server's message claims that the uncompressed length is {}, but was \ computed to be {}.", uncompressed_size, - decoded_message.len(), + decompressed.len(), ), } .into()); } - // Read decompressed message as a standard OP_MSG - let reader = decoded_message.as_slice(); - let length_remaining = decoded_message.len(); + // Read decompressed message as a standard OP_MSG. + let reader = decompressed.as_slice(); + let length_remaining = decompressed.len(); - Self::read_op_common(reader, length_remaining as i32, header) + Self::read_op_common(reader, length_remaining, header) } - fn read_op_common( - mut reader: &[u8], - mut length_remaining: i32, - header: &Header, - ) -> Result { + fn read_op_common(mut reader: &[u8], length_remaining: usize, header: &Header) -> Result { + let mut length_remaining = Checked::new(length_remaining); let flags = MessageFlags::from_bits_truncate(reader.read_u32_sync()?); - length_remaining -= std::mem::size_of::() as i32; + length_remaining -= std::mem::size_of::(); let mut count_reader = SyncCountReader::new(&mut reader); let mut document_payload = None; let mut document_sequences = Vec::new(); - while length_remaining - count_reader.bytes_read() as i32 > 4 { + while (length_remaining - count_reader.bytes_read()).get()? > 4 { let next_section = MessageSection::read(&mut count_reader)?; match next_section { MessageSection::Document(document) => { @@ -216,22 +212,19 @@ impl Message { } } - length_remaining -= count_reader.bytes_read() as i32; + length_remaining -= count_reader.bytes_read(); let mut checksum = None; - if length_remaining == 4 && flags.contains(MessageFlags::CHECKSUM_PRESENT) { + if length_remaining.get()? == 4 && flags.contains(MessageFlags::CHECKSUM_PRESENT) { checksum = Some(reader.read_u32_sync()?); - } else if length_remaining != 0 { - return Err(ErrorKind::InvalidResponse { - message: format!( - "The server indicated that the reply would be {} bytes long, but it instead \ - was {}", - header.length, - header.length - length_remaining + count_reader.bytes_read() as i32, - ), - } - .into()); + } else if length_remaining.get()? != 0 { + let header_len = Checked::::try_from(header.length)?; + return Err(Error::invalid_response(format!( + "The server indicated that the reply would be {} bytes long, but it instead was {}", + header.length, + header_len - length_remaining + count_reader.bytes_read(), + ))); } Ok(Self { @@ -247,11 +240,14 @@ impl Message { }) } - /// Serializes the Message to bytes and writes them to `writer`. - pub(crate) async fn write_to(&self, mut writer: T) -> Result<()> { - let sections = self.get_sections_bytes(); + /// Serializes this message into an OP_MSG and writes it to the provided writer. + pub(crate) async fn write_op_msg_to( + &self, + mut writer: T, + ) -> Result<()> { + let sections = self.get_sections_bytes()?; - let total_length = Header::LENGTH + let total_length = Checked::new(Header::LENGTH) + std::mem::size_of::() + sections.len() + self @@ -261,7 +257,7 @@ impl Message { .unwrap_or(0); let header = Header { - length: total_length as i32, + length: total_length.try_into()?, request_id: self.request_id.unwrap_or_else(next_request_id), response_to: self.response_to, op_code: OpCode::Message, @@ -280,47 +276,40 @@ impl Message { Ok(()) } - /// Serializes message to bytes, compresses those bytes, and writes the bytes. - pub(crate) async fn write_compressed_to( + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] + /// Serializes this message into an OP_COMPRESSED message and writes it to the provided writer. + pub(crate) async fn write_op_compressed_to( &self, mut writer: T, compressor: &Compressor, ) -> Result<()> { - let mut encoder = compressor.to_encoder()?; - let compressor_id = compressor.id() as u8; + let flag_bytes = &self.flags.bits().to_le_bytes(); + let section_bytes = self.get_sections_bytes()?; + let uncompressed_len = Checked::new(section_bytes.len()) + flag_bytes.len(); - let sections = self.get_sections_bytes(); + let compressed_bytes = compressor.compress(flag_bytes, §ion_bytes)?; - let flag_bytes = &self.flags.bits().to_le_bytes(); - let uncompressed_len = sections.len() + flag_bytes.len(); - // Compress the flags and sections. Depending on the handshake - // this could use zlib, zstd or snappy - encoder.write_all(flag_bytes)?; - encoder.write_all(sections.as_slice())?; - let compressed_bytes = encoder.finish()?; - - let total_length = Header::LENGTH + let total_length = Checked::new(Header::LENGTH) + std::mem::size_of::() + std::mem::size_of::() + std::mem::size_of::() + compressed_bytes.len(); let header = Header { - length: total_length as i32, + length: total_length.try_into()?, request_id: self.request_id.unwrap_or_else(next_request_id), response_to: self.response_to, op_code: OpCode::Compressed, }; - // Write header header.write_to(&mut writer).await?; - // Write original (pre-compressed) opcode (always OP_MSG) writer.write_i32_le(OpCode::Message as i32).await?; - // Write uncompressed size - writer.write_i32_le(uncompressed_len as i32).await?; - // Write compressor id - writer.write_u8(compressor_id).await?; - // Write compressed message + writer.write_i32_le(uncompressed_len.try_into()?).await?; + writer.write_u8(compressor.id()).await?; writer.write_all(compressed_bytes.as_slice()).await?; writer.flush().await?; @@ -328,7 +317,7 @@ impl Message { Ok(()) } - fn get_sections_bytes(&self) -> Vec { + fn get_sections_bytes(&self) -> Result> { let mut sections = Vec::new(); // Payload type 0 @@ -349,8 +338,8 @@ impl Message { }); // Size bytes + identifier bytes + null-terminator byte + document bytes - let size = 4 + identifier_bytes.len() + 1 + documents_size; - sections.extend((size as i32).to_le_bytes()); + let size = Checked::new(4) + identifier_bytes.len() + 1 + documents_size; + sections.extend(size.try_into::()?.to_le_bytes()); sections.extend(identifier_bytes); sections.push(0); @@ -360,7 +349,7 @@ impl Message { } } - sections + Ok(sections) } } @@ -393,28 +382,28 @@ impl MessageSection { return Ok(MessageSection::Document(document)); } - let size = reader.read_i32_sync()?; - let mut length_remaining = size - std::mem::size_of::() as i32; + let size = Checked::::try_from(reader.read_i32_sync()?)?; + let mut length_remaining = size - std::mem::size_of::(); let mut identifier = String::new(); - length_remaining -= reader.read_to_string(&mut identifier)? as i32; + length_remaining -= reader.read_to_string(&mut identifier)?; let mut documents = Vec::new(); let mut count_reader = SyncCountReader::new(reader); - while length_remaining > count_reader.bytes_read() as i32 { + while length_remaining.get()? > count_reader.bytes_read() { let bytes = bson_util::read_document_bytes(&mut count_reader)?; let document = RawDocumentBuf::from_bytes(bytes)?; documents.push(document); } - if length_remaining != count_reader.bytes_read() as i32 { + if length_remaining.get()? != count_reader.bytes_read() { return Err(ErrorKind::InvalidResponse { message: format!( "The server indicated that the reply would be {} bytes long, but it instead \ was {}", size, - length_remaining + count_reader.bytes_read() as i32, + length_remaining + count_reader.bytes_read(), ), } .into()); diff --git a/src/cmap/establish.rs b/src/cmap/establish.rs index 9c986b4d2..fb5291836 100644 --- a/src/cmap/establish.rs +++ b/src/cmap/establish.rs @@ -43,6 +43,11 @@ impl EstablisherOptions { Self { handshake_options: HandshakerOptions { app_name: opts.app_name.clone(), + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] compressors: opts.compressors.clone(), driver_info: opts.driver_info.clone(), server_api: opts.server_api.clone(), diff --git a/src/cmap/establish/handshake.rs b/src/cmap/establish/handshake.rs index 1e335019d..a0c03cef5 100644 --- a/src/cmap/establish/handshake.rs +++ b/src/cmap/establish/handshake.rs @@ -5,11 +5,16 @@ use std::env; use once_cell::sync::Lazy; +#[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" +))] +use crate::options::Compressor; use crate::{ bson::{doc, Bson, Document}, client::auth::ClientFirst, cmap::{Command, Connection, StreamDescription}, - compression::Compressor, error::Result, hello::{hello_command, run_hello, HelloReply}, options::{AuthMechanism, Credential, DriverInfo, ServerApi}, @@ -315,8 +320,11 @@ pub(crate) struct Handshaker { /// given the same pool options, so it can be created at the time the Handshaker is created. command: Command, - // This field is not read without a compression feature flag turned on. - #[allow(dead_code)] + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] compressors: Option>, server_api: Option, @@ -331,7 +339,6 @@ impl Handshaker { /// Creates a new Handshaker. pub(crate) fn new(options: HandshakerOptions) -> Self { let mut metadata = BASE_CLIENT_METADATA.clone(); - let compressors = options.compressors; let mut command = hello_command( options.server_api.as_ref(), @@ -365,14 +372,17 @@ impl Handshaker { command.body.insert("loadBalanced", true); } - // Add compressors to handshake. - // See https://github.com/mongodb/specifications/blob/master/source/compression/OP_COMPRESSED.rst - if let Some(ref compressors) = compressors { + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] + if let Some(ref compressors) = options.compressors { command.body.insert( "compression", compressors .iter() - .map(|x| x.name()) + .map(|compressor| compressor.name()) .collect::>(), ); } @@ -381,7 +391,12 @@ impl Handshaker { Self { command, - compressors, + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] + compressors: options.compressors, server_api: options.server_api, metadata, #[cfg(feature = "aws-auth")] @@ -428,24 +443,22 @@ impl Handshaker { .map(|server_first| client_first.into_first_round(server_first)) }); - // Check that the hello reply has a compressor list and unpack it + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] if let (Some(server_compressors), Some(client_compressors)) = ( hello_reply.command_response.compressors.as_ref(), self.compressors.as_ref(), ) { - // Use the Client's first compressor choice that the server supports (comparing only on - // enum variant) - if let Some(compressor) = client_compressors - .iter() - .find(|c| server_compressors.iter().any(|x| c.name() == x)) - { - // Without a feature flag turned on, the Compressor enum is empty which causes an - // unreachable code warning. - #[allow(unreachable_code)] - // zlib compression level is already set - { - conn.compressor = Some(compressor.clone()); - } + // Use the first compressor in the user's list that is also supported by the server. + if let Some(compressor) = client_compressors.iter().find(|client_compressor| { + server_compressors + .iter() + .any(|server_compressor| client_compressor.name() == server_compressor) + }) { + conn.compressor = Some(compressor.clone()); } } @@ -473,9 +486,13 @@ pub(crate) struct HandshakerOptions { /// handshake that each connection makes when it's created. pub(crate) app_name: Option, - /// The compressors that the Client is willing to use in the order they are specified - /// in the configuration. The Client sends this list of compressors to the server. - /// The server responds with the intersection of its supported list of compressors. + /// The compressors specified by the user. This list is sent to the server and the server + /// replies with the subset of the compressors it supports. + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] pub(crate) compressors: Option>, /// Extra information to append to the driver version in the metadata of the handshake with the diff --git a/src/cmap/establish/handshake/test.rs b/src/cmap/establish/handshake/test.rs index 127b9fdf5..61fb2fa40 100644 --- a/src/cmap/establish/handshake/test.rs +++ b/src/cmap/establish/handshake/test.rs @@ -5,6 +5,11 @@ use crate::{bson::doc, cmap::establish::handshake::HandshakerOptions, options::D fn metadata_no_options() { let handshaker = Handshaker::new(HandshakerOptions { app_name: None, + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] compressors: None, driver_info: None, server_api: None, @@ -38,6 +43,11 @@ fn metadata_with_options() { .version(version.to_string()) .build(), ), + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] compressors: None, server_api: None, load_balanced: false, diff --git a/src/cmap/test.rs b/src/cmap/test.rs index da202e117..fc751a476 100644 --- a/src/cmap/test.rs +++ b/src/cmap/test.rs @@ -6,11 +6,10 @@ use std::{collections::HashMap, ops::Deref, sync::Arc, time::Duration}; use tokio::sync::{Mutex, RwLock}; -use self::{ - event::TestEventHandler, - file::{Operation, TestFile, ThreadedOperation}, -}; +use self::file::{Operation, TestFile, ThreadedOperation}; +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ cmap::{ establish::{ConnectionEstablisher, EstablisherOptions}, @@ -21,8 +20,7 @@ use crate::{ error::{Error, ErrorKind, Result}, event::cmap::{CmapEvent, ConnectionPoolOptions as EventOptions}, options::TlsOptions, - runtime, - runtime::AsyncJoinHandle, + runtime::{self, AsyncJoinHandle}, sdam::{TopologyUpdater, UpdateMessage}, test::{ assert_matches, @@ -30,7 +28,7 @@ use crate::{ get_client_options, log_uncaptured, run_spec_test, - EventClient, + util::event_buffer::EventBuffer, MatchErrExt, Matchable, }, @@ -70,7 +68,7 @@ struct Executor { #[derive(Debug)] struct State { - handler: Arc, + events: EventBuffer, connections: RwLock>, unlabeled_connections: Mutex>, threads: RwLock>, @@ -85,11 +83,9 @@ struct State { impl State { // Counts the number of events of the given type that have occurred so far. fn count_events(&self, event_type: &str) -> usize { - self.handler - .events - .read() - .unwrap() - .iter() + self.events + .all() + .into_iter() .filter(|cmap_event| cmap_event.name() == event_type) .count() } @@ -126,14 +122,14 @@ impl CmapThread { impl Executor { async fn new(test_file: TestFile) -> Self { - let handler = Arc::new(TestEventHandler::new()); + let events = EventBuffer::new(); let error = test_file.error; let mut pool_options = test_file.pool_options.unwrap_or_default(); - pool_options.cmap_event_handler = Some(handler.clone().into()); + pool_options.cmap_event_handler = Some(events.handler()); let state = State { - handler, + events, pool: RwLock::new(None), connections: Default::default(), threads: Default::default(), @@ -152,7 +148,8 @@ impl Executor { } async fn execute_test(self) { - let mut subscriber = self.state.handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = self.state.events.subscribe(); let (updater, mut receiver) = TopologyUpdater::channel(); @@ -268,7 +265,8 @@ impl Operation { } } Operation::CheckIn { connection } => { - let mut subscriber = state.handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = state.events.subscribe(); let conn = state.connections.write().await.remove(&connection).unwrap(); let id = conn.id; // connections are checked in via tasks spawned in their drop implementation, @@ -306,7 +304,8 @@ impl Operation { } } Operation::Close => { - let mut subscriber = state.handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = state.events.subscribe(); // pools are closed via their drop implementation state.pool.write().await.take(); @@ -442,6 +441,7 @@ async fn cmap_spec_tests() { } options.hosts.drain(1..); options.direct_connection = Some(true); + #[allow(deprecated)] let client = EventClient::with_options(options).await; if let Some(ref run_on) = test_file.run_on { let can_run_on = run_on.iter().any(|run_on| run_on.can_run_on(&client)); diff --git a/src/cmap/test/event.rs b/src/cmap/test/event.rs index 0a3f6a5a2..1328ed2bf 100644 --- a/src/cmap/test/event.rs +++ b/src/cmap/test/event.rs @@ -1,87 +1,8 @@ -use std::{ - sync::{Arc, RwLock}, - time::Duration, -}; +use std::time::Duration; use serde::{de::Unexpected, Deserialize, Deserializer, Serialize}; -use crate::{event::cmap::*, options::ServerAddress, test::util::EventSubscriber}; -use tokio::sync::broadcast::error::SendError; - -#[derive(Clone, Debug)] -pub struct TestEventHandler { - pub(crate) events: Arc>>, - channel_sender: tokio::sync::broadcast::Sender, -} - -impl TestEventHandler { - pub fn new() -> Self { - let (channel_sender, _) = tokio::sync::broadcast::channel(500); - Self { - events: Default::default(), - channel_sender, - } - } - - fn handle>(&self, event: E) { - let event = event.into(); - // this only errors if no receivers are listening which isn't a concern here. - let _: std::result::Result> = - self.channel_sender.send(event.clone()); - self.events.write().unwrap().push(event); - } - - pub(crate) fn subscribe(&self) -> EventSubscriber<'_, TestEventHandler, CmapEvent> { - EventSubscriber::new(self, self.channel_sender.subscribe()) - } -} - -#[allow(deprecated)] -impl CmapEventHandler for TestEventHandler { - fn handle_pool_created_event(&self, event: PoolCreatedEvent) { - self.handle(event); - } - - fn handle_pool_ready_event(&self, event: PoolReadyEvent) { - self.handle(event); - } - - fn handle_pool_cleared_event(&self, event: PoolClearedEvent) { - self.handle(event); - } - - fn handle_pool_closed_event(&self, event: PoolClosedEvent) { - self.handle(event); - } - - fn handle_connection_created_event(&self, event: ConnectionCreatedEvent) { - self.handle(event); - } - - fn handle_connection_ready_event(&self, event: ConnectionReadyEvent) { - self.handle(event); - } - - fn handle_connection_closed_event(&self, event: ConnectionClosedEvent) { - self.handle(event); - } - - fn handle_connection_checkout_started_event(&self, event: ConnectionCheckoutStartedEvent) { - self.handle(event); - } - - fn handle_connection_checkout_failed_event(&self, event: ConnectionCheckoutFailedEvent) { - self.handle(event); - } - - fn handle_connection_checked_out_event(&self, event: ConnectionCheckedOutEvent) { - self.handle(event); - } - - fn handle_connection_checked_in_event(&self, event: ConnectionCheckedInEvent) { - self.handle(event); - } -} +use crate::{event::cmap::*, options::ServerAddress}; impl Serialize for CmapEvent { fn serialize(&self, serializer: S) -> Result diff --git a/src/cmap/test/integration.rs b/src/cmap/test/integration.rs index 3efb76068..2a16febb5 100644 --- a/src/cmap/test/integration.rs +++ b/src/cmap/test/integration.rs @@ -1,6 +1,6 @@ use serde::Deserialize; -use super::{event::TestEventHandler, EVENT_TIMEOUT}; +use super::EVENT_TIMEOUT; use crate::{ bson::{doc, Document}, cmap::{ @@ -15,10 +15,17 @@ use crate::{ runtime, sdam::TopologyUpdater, selection_criteria::ReadPreference, - test::{get_client_options, log_uncaptured, FailPoint, FailPointMode, TestClient}, + test::{ + get_client_options, + log_uncaptured, + util::event_buffer::EventBuffer, + FailPoint, + FailPointMode, + TestClient, + }, }; use semver::VersionReq; -use std::{sync::Arc, time::Duration}; +use std::time::Duration; #[derive(Debug, Deserialize)] struct ListDatabasesResponse { @@ -105,10 +112,10 @@ async fn concurrent_connections() { .await .expect("failpoint should succeed"); - let handler = Arc::new(TestEventHandler::new()); + let buffer = EventBuffer::::new(); let client_options = get_client_options().await.clone(); let mut options = ConnectionPoolOptions::from_client_options(&client_options); - options.cmap_event_handler = Some(handler.clone().into()); + options.cmap_event_handler = Some(buffer.handler()); options.ready = Some(true); let pool = ConnectionPool::new( @@ -130,7 +137,7 @@ async fn concurrent_connections() { { // ensure all three ConnectionCreatedEvents were emitted before one ConnectionReadyEvent. - let events = handler.events.read().unwrap(); + let events = buffer.all(); let mut consecutive_creations = 0; for event in events.iter() { match event { @@ -192,12 +199,13 @@ async fn connection_error_during_establishment() { .await .unwrap(); - let handler = Arc::new(TestEventHandler::new()); - let mut subscriber = handler.subscribe(); + let buffer = EventBuffer::::new(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); let mut options = ConnectionPoolOptions::from_client_options(&client_options); options.ready = Some(true); - options.cmap_event_handler = Some(handler.clone().into()); + options.cmap_event_handler = Some(buffer.handler()); let pool = ConnectionPool::new( client_options.hosts[0].clone(), ConnectionEstablisher::new(EstablisherOptions::from_client_options(&client_options)) @@ -225,8 +233,8 @@ async fn connection_error_during_establishment() { async fn connection_error_during_operation() { let mut options = get_client_options().await.clone(); - let handler = Arc::new(TestEventHandler::new()); - options.cmap_event_handler = Some(handler.clone().into()); + let buffer = EventBuffer::::new(); + options.cmap_event_handler = Some(buffer.handler()); options.hosts.drain(1..); options.max_pool_size = Some(1); @@ -246,7 +254,8 @@ async fn connection_error_during_operation() { .await .unwrap(); - let mut subscriber = handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); client .database("test") diff --git a/src/coll.rs b/src/coll.rs index 451880133..f2a032c7d 100644 --- a/src/coll.rs +++ b/src/coll.rs @@ -1,31 +1,20 @@ mod action; pub mod options; -use std::{borrow::Borrow, collections::HashSet, fmt, fmt::Debug, str::FromStr, sync::Arc}; - -use futures_util::stream::StreamExt; -use serde::{ - de::{DeserializeOwned, Error as DeError}, - Deserialize, - Deserializer, - Serialize, -}; +use std::{fmt, fmt::Debug, str::FromStr, sync::Arc}; + +use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize}; use self::options::*; use crate::{ - bson::{doc, Document}, + bson::doc, client::options::ServerAddress, cmap::conn::PinnedConnectionHandle, concern::{ReadConcern, WriteConcern}, - error::{convert_bulk_errors, BulkWriteError, BulkWriteFailure, Error, ErrorKind, Result}, - operation::{Find, FindAndModify, Insert, Update}, - results::{InsertManyResult, InsertOneResult, UpdateResult}, + error::{Error, Result}, selection_criteria::SelectionCriteria, Client, - ClientSession, - Cursor, Database, - SessionCursor, }; /// `Collection` is the client-side abstraction of a MongoDB Collection. It can be used to @@ -74,7 +63,7 @@ use crate::{ /// // Spawn several tasks that operate on the same collection concurrently. /// tokio::task::spawn(async move { /// // Perform operations with `coll_ref` that work with directly our model. -/// coll_ref.insert_one(Item { id: i }, None).await; +/// coll_ref.insert_one(Item { id: i }).await; /// }); /// } /// # @@ -82,14 +71,20 @@ use crate::{ /// # } /// ``` #[derive(Debug)] -pub struct Collection { +pub struct Collection +where + T: Send + Sync, +{ inner: Arc, _phantom: std::marker::PhantomData T>, } // Because derive is too conservative, derive only implements Clone if T is Clone. // Collection does not actually store any value of type T (so T does not need to be clone). -impl Clone for Collection { +impl Clone for Collection +where + T: Send + Sync, +{ fn clone(&self) -> Self { Self { inner: self.inner.clone(), @@ -109,7 +104,10 @@ struct CollectionInner { human_readable_serialization: bool, } -impl Collection { +impl Collection +where + T: Send + Sync, +{ pub(crate) fn new(db: Database, name: &str, options: Option) -> Self { let options = options.unwrap_or_default(); let selection_criteria = options @@ -139,7 +137,7 @@ impl Collection { } /// Gets a clone of the `Collection` with a different type `U`. - pub fn clone_with_type(&self) -> Collection { + pub fn clone_with_type(&self) -> Collection { Collection { inner: self.inner.clone(), _phantom: Default::default(), @@ -216,520 +214,11 @@ impl Collection { Ok(()) } - /// Finds the documents in the collection matching `filter`. - pub async fn find( - &self, - filter: impl Into>, - options: impl Into>, - ) -> Result> { - let mut options = options.into(); - resolve_options!(self, options, [read_concern, selection_criteria]); - - let find = Find::new(self.namespace(), filter.into(), options); - let client = self.client(); - - client.execute_cursor_operation(find).await - } - - /// Finds the documents in the collection matching `filter` using the provided `ClientSession`. - pub async fn find_with_session( - &self, - filter: impl Into>, - options: impl Into>, - session: &mut ClientSession, - ) -> Result> { - let mut options = options.into(); - resolve_read_concern_with_session!(self, options, Some(&mut *session))?; - resolve_selection_criteria_with_session!(self, options, Some(&mut *session))?; - - let find = Find::new(self.namespace(), filter.into(), options); - let client = self.client(); - - client.execute_session_cursor_operation(find, session).await - } - pub(crate) fn human_readable_serialization(&self) -> bool { self.inner.human_readable_serialization } } -impl Collection -where - T: DeserializeOwned + Unpin + Send + Sync, -{ - /// Finds a single document in the collection matching `filter`. - pub async fn find_one( - &self, - filter: impl Into>, - options: impl Into>, - ) -> Result> { - let mut options = options.into(); - resolve_options!(self, options, [read_concern, selection_criteria]); - - let options: FindOptions = options.map(Into::into).unwrap_or_else(Default::default); - let mut cursor = self.find(filter, Some(options)).await?; - cursor.next().await.transpose() - } - - /// Finds a single document in the collection matching `filter` using the provided - /// `ClientSession`. - pub async fn find_one_with_session( - &self, - filter: impl Into>, - options: impl Into>, - session: &mut ClientSession, - ) -> Result> { - let mut options = options.into(); - resolve_read_concern_with_session!(self, options, Some(&mut *session))?; - resolve_selection_criteria_with_session!(self, options, Some(&mut *session))?; - - let options: FindOptions = options.map(Into::into).unwrap_or_else(Default::default); - let mut cursor = self - .find_with_session(filter, Some(options), session) - .await?; - let mut cursor = cursor.stream(session); - cursor.next().await.transpose() - } -} - -impl Collection -where - T: DeserializeOwned, -{ - async fn find_one_and_delete_common( - &self, - filter: Document, - options: impl Into>, - session: impl Into>, - ) -> Result> { - let session = session.into(); - - let mut options = options.into(); - resolve_write_concern_with_session!(self, options, session.as_ref())?; - - let op = FindAndModify::with_delete(self.namespace(), filter, options); - self.client().execute_operation(op, session).await - } - - /// Atomically finds up to one document in the collection matching `filter` and deletes it. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn find_one_and_delete( - &self, - filter: Document, - options: impl Into>, - ) -> Result> { - self.find_one_and_delete_common(filter, options, None).await - } - - /// Atomically finds up to one document in the collection matching `filter` and deletes it using - /// the provided `ClientSession`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn find_one_and_delete_with_session( - &self, - filter: Document, - options: impl Into>, - session: &mut ClientSession, - ) -> Result> { - self.find_one_and_delete_common(filter, options, session) - .await - } - - async fn find_one_and_update_common( - &self, - filter: Document, - update: impl Into, - options: impl Into>, - session: impl Into>, - ) -> Result> { - let update = update.into(); - - let session = session.into(); - - let mut options = options.into(); - resolve_write_concern_with_session!(self, options, session.as_ref())?; - - let op = FindAndModify::with_update(self.namespace(), filter, update, options)?; - self.client().execute_operation(op, session).await - } - - /// Atomically finds up to one document in the collection matching `filter` and updates it. - /// Both `Document` and `Vec` implement `Into`, so either can be - /// passed in place of constructing the enum case. Note: pipeline updates are only supported - /// in MongoDB 4.2+. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn find_one_and_update( - &self, - filter: Document, - update: impl Into, - options: impl Into>, - ) -> Result> { - self.find_one_and_update_common(filter, update, options, None) - .await - } - - /// Atomically finds up to one document in the collection matching `filter` and updates it using - /// the provided `ClientSession`. Both `Document` and `Vec` implement - /// `Into`, so either can be passed in place of constructing the enum - /// case. Note: pipeline updates are only supported in MongoDB 4.2+. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn find_one_and_update_with_session( - &self, - filter: Document, - update: impl Into, - options: impl Into>, - session: &mut ClientSession, - ) -> Result> { - self.find_one_and_update_common(filter, update, options, session) - .await - } -} - -impl Collection -where - T: Serialize + DeserializeOwned, -{ - async fn find_one_and_replace_common( - &self, - filter: Document, - replacement: impl Borrow, - options: impl Into>, - session: impl Into>, - ) -> Result> { - let mut options = options.into(); - let session = session.into(); - resolve_write_concern_with_session!(self, options, session.as_ref())?; - - let op = FindAndModify::with_replace( - self.namespace(), - filter, - replacement.borrow(), - options, - self.inner.human_readable_serialization, - )?; - self.client().execute_operation(op, session).await - } - - /// Atomically finds up to one document in the collection matching `filter` and replaces it with - /// `replacement`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn find_one_and_replace( - &self, - filter: Document, - replacement: impl Borrow, - options: impl Into>, - ) -> Result> { - self.find_one_and_replace_common(filter, replacement, options, None) - .await - } - - /// Atomically finds up to one document in the collection matching `filter` and replaces it with - /// `replacement` using the provided `ClientSession`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn find_one_and_replace_with_session( - &self, - filter: Document, - replacement: impl Borrow, - options: impl Into>, - session: &mut ClientSession, - ) -> Result> { - self.find_one_and_replace_common(filter, replacement, options, session) - .await - } -} - -impl Collection -where - T: Serialize, -{ - #[allow(clippy::needless_option_as_deref)] - async fn insert_many_common( - &self, - docs: impl IntoIterator>, - options: impl Into>, - mut session: Option<&mut ClientSession>, - ) -> Result { - let ds: Vec<_> = docs.into_iter().collect(); - let mut options = options.into(); - resolve_write_concern_with_session!(self, options, session.as_ref())?; - - if ds.is_empty() { - return Err(ErrorKind::InvalidArgument { - message: "No documents provided to insert_many".to_string(), - } - .into()); - } - - let ordered = options.as_ref().and_then(|o| o.ordered).unwrap_or(true); - - let mut cumulative_failure: Option = None; - let mut error_labels: HashSet = Default::default(); - let mut cumulative_result: Option = None; - - let mut n_attempted = 0; - - while n_attempted < ds.len() { - let docs: Vec<&T> = ds.iter().skip(n_attempted).map(Borrow::borrow).collect(); - let insert = Insert::new( - self.namespace(), - docs, - options.clone(), - self.client().should_auto_encrypt().await, - self.inner.human_readable_serialization, - ); - - match self - .client() - .execute_operation(insert, session.as_deref_mut()) - .await - { - Ok(result) => { - let current_batch_size = result.inserted_ids.len(); - - let cumulative_result = - cumulative_result.get_or_insert_with(InsertManyResult::new); - for (index, id) in result.inserted_ids { - cumulative_result - .inserted_ids - .insert(index + n_attempted, id); - } - - n_attempted += current_batch_size; - } - Err(e) => { - let labels = e.labels().clone(); - match *e.kind { - ErrorKind::BulkWrite(bw) => { - // for ordered inserts this size will be incorrect, but knowing the - // batch size isn't needed for ordered - // failures since we return immediately from - // them anyways. - let current_batch_size = bw.inserted_ids.len() - + bw.write_errors.as_ref().map(|we| we.len()).unwrap_or(0); - - let failure_ref = - cumulative_failure.get_or_insert_with(BulkWriteFailure::new); - if let Some(write_errors) = bw.write_errors { - for err in write_errors { - let index = n_attempted + err.index; - - failure_ref - .write_errors - .get_or_insert_with(Default::default) - .push(BulkWriteError { index, ..err }); - } - } - - if let Some(wc_error) = bw.write_concern_error { - failure_ref.write_concern_error = Some(wc_error); - } - - error_labels.extend(labels); - - if ordered { - // this will always be true since we invoked get_or_insert_with - // above. - if let Some(failure) = cumulative_failure { - return Err(Error::new( - ErrorKind::BulkWrite(failure), - Some(error_labels), - )); - } - } - n_attempted += current_batch_size; - } - _ => return Err(e), - } - } - } - } - - match cumulative_failure { - Some(failure) => Err(Error::new( - ErrorKind::BulkWrite(failure), - Some(error_labels), - )), - None => Ok(cumulative_result.unwrap_or_else(InsertManyResult::new)), - } - } - - /// Inserts the data in `docs` into the collection. - /// - /// Note that this method accepts both owned and borrowed values, so the input documents - /// do not need to be cloned in order to be passed in. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn insert_many( - &self, - docs: impl IntoIterator>, - options: impl Into>, - ) -> Result { - self.insert_many_common(docs, options, None).await - } - - /// Inserts the data in `docs` into the collection using the provided `ClientSession`. - /// - /// Note that this method accepts both owned and borrowed values, so the input documents - /// do not need to be cloned in order to be passed in. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn insert_many_with_session( - &self, - docs: impl IntoIterator>, - options: impl Into>, - session: &mut ClientSession, - ) -> Result { - self.insert_many_common(docs, options, Some(session)).await - } - - async fn insert_one_common( - &self, - doc: &T, - options: impl Into>, - session: impl Into>, - ) -> Result { - let session = session.into(); - - let mut options = options.into(); - resolve_write_concern_with_session!(self, options, session.as_ref())?; - - let insert = Insert::new( - self.namespace(), - vec![doc], - options.map(InsertManyOptions::from_insert_one_options), - self.client().should_auto_encrypt().await, - self.inner.human_readable_serialization, - ); - self.client() - .execute_operation(insert, session) - .await - .map(InsertOneResult::from_insert_many_result) - .map_err(convert_bulk_errors) - } - - /// Inserts `doc` into the collection. - /// - /// Note that either an owned or borrowed value can be inserted here, so the input document - /// does not need to be cloned to be passed in. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn insert_one( - &self, - doc: impl Borrow, - options: impl Into>, - ) -> Result { - self.insert_one_common(doc.borrow(), options, None).await - } - - /// Inserts `doc` into the collection using the provided `ClientSession`. - /// - /// Note that either an owned or borrowed value can be inserted here, so the input document - /// does not need to be cloned to be passed in. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn insert_one_with_session( - &self, - doc: impl Borrow, - options: impl Into>, - session: &mut ClientSession, - ) -> Result { - self.insert_one_common(doc.borrow(), options, session).await - } - - async fn replace_one_common( - &self, - query: Document, - replacement: impl Borrow, - options: impl Into>, - session: impl Into>, - ) -> Result { - let mut options = options.into(); - - let session = session.into(); - - resolve_write_concern_with_session!(self, options, session.as_ref())?; - - let update = Update::with_replace( - self.namespace(), - query, - replacement.borrow(), - false, - options.map(UpdateOptions::from_replace_options), - self.inner.human_readable_serialization, - ); - self.client().execute_operation(update, session).await - } - - /// Replaces up to one document matching `query` in the collection with `replacement`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn replace_one( - &self, - query: Document, - replacement: impl Borrow, - options: impl Into>, - ) -> Result { - self.replace_one_common(query, replacement, options, None) - .await - } - - /// Replaces up to one document matching `query` in the collection with `replacement` using the - /// provided `ClientSession`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub async fn replace_one_with_session( - &self, - query: Document, - replacement: impl Borrow, - options: impl Into>, - session: &mut ClientSession, - ) -> Result { - self.replace_one_common(query, replacement, options, session) - .await - } -} - /// A struct modeling the canonical name for a collection in MongoDB. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Namespace { diff --git a/src/coll/action/drop.rs b/src/coll/action/drop.rs index 453270d02..bab2faa54 100644 --- a/src/coll/action/drop.rs +++ b/src/coll/action/drop.rs @@ -4,27 +4,31 @@ use crate::{ operation::drop_collection as op, }; -action_impl! { - impl<'a> Action for DropCollection<'a> { - type Future = DropCollectionFuture; +#[action_impl] +impl<'a> Action for DropCollection<'a> { + type Future = DropCollectionFuture; - async fn execute(mut self) -> Result<()> { - resolve_options!(self.cr, self.options, [write_concern]); + async fn execute(mut self) -> Result<()> { + resolve_options!(self.cr, self.options, [write_concern]); - #[cfg(feature = "in-use-encryption-unstable")] - self.cr.drop_aux_collections(self.options.as_ref(), self.session.as_deref_mut()) - .await?; + #[cfg(feature = "in-use-encryption-unstable")] + self.cr + .drop_aux_collections(self.options.as_ref(), self.session.as_deref_mut()) + .await?; - let drop = op::DropCollection::new(self.cr.namespace(), self.options); - self.cr.client() - .execute_operation(drop, self.session.as_deref_mut()) - .await - } + let drop = op::DropCollection::new(self.cr.namespace(), self.options); + self.cr + .client() + .execute_operation(drop, self.session.as_deref_mut()) + .await } } #[cfg(feature = "in-use-encryption-unstable")] -impl crate::Collection { +impl crate::Collection +where + T: Send + Sync, +{ #[allow(clippy::needless_option_as_deref)] async fn drop_aux_collections( &self, diff --git a/src/coll/options.rs b/src/coll/options.rs index 9ec535dda..1f10bd7af 100644 --- a/src/coll/options.rs +++ b/src/coll/options.rs @@ -61,6 +61,15 @@ pub enum ReturnDocument { Before, } +impl ReturnDocument { + pub(crate) fn as_bool(&self) -> bool { + match self { + ReturnDocument::After => true, + ReturnDocument::Before => false, + } + } +} + impl<'de> Deserialize<'de> for ReturnDocument { fn deserialize>(deserializer: D) -> std::result::Result { let s = String::deserialize(deserializer)?; diff --git a/src/collation.rs b/src/collation.rs index bae93d02b..b7fb2334d 100644 --- a/src/collation.rs +++ b/src/collation.rs @@ -128,7 +128,7 @@ impl Serialize for CollationStrength { S: serde::Serializer, { let level = u32::from(*self); - serializer.serialize_i32(level as i32) + serializer.serialize_i32(level.try_into().map_err(serde::ser::Error::custom)?) } } diff --git a/src/compression.rs b/src/compression.rs index 7f0071ea2..0d8f2c748 100644 --- a/src/compression.rs +++ b/src/compression.rs @@ -3,321 +3,19 @@ feature = "zlib-compression", feature = "snappy-compression" ))] -#[cfg(test)] -mod test; - -#[cfg(feature = "zlib-compression")] -use flate2::{ - write::{ZlibDecoder, ZlibEncoder}, - Compression, -}; - -#[cfg(feature = "zlib-compression")] -use std::convert::TryInto; - +pub(crate) mod compress; #[cfg(any( feature = "zstd-compression", feature = "zlib-compression", feature = "snappy-compression" ))] -use std::io::Write; - -use crate::error::{Error, ErrorKind, Result}; - -#[derive(Clone, Debug, PartialEq)] -pub(crate) enum CompressorId { - Noop = 0, - #[cfg(feature = "snappy-compression")] - Snappy = 1, - #[cfg(feature = "zlib-compression")] - Zlib = 2, - #[cfg(feature = "zstd-compression")] - Zstd = 3, -} - -impl CompressorId { - pub(crate) fn from_u8(id: u8) -> Result { - match id { - 0 => Ok(CompressorId::Noop), - #[cfg(feature = "snappy-compression")] - 1 => Ok(CompressorId::Snappy), - #[cfg(feature = "zlib-compression")] - 2 => Ok(CompressorId::Zlib), - #[cfg(feature = "zstd-compression")] - 3 => Ok(CompressorId::Zstd), - other => Err(ErrorKind::InvalidResponse { - message: format!("Invalid compressor id: {}", other), - } - .into()), - } - } -} - -/// Enum representing supported compressor algorithms. -/// Used for compressing and decompressing messages sent to and read from the server. -/// For compressors that take a `level`, use `None` to indicate the default level. -/// Higher `level` indicates more compression (and slower). -/// Requires `zstd-compression` feature flag to use `Zstd` compressor, -/// `zlib-compression` feature flag to use `Zlib` compressor, and -/// `snappy-compression` feature flag to use `Snappy` Compressor. -#[derive(Clone, Debug, PartialEq)] -#[non_exhaustive] -pub enum Compressor { - /// Zstd compressor. Requires Rust version 1.54. - /// See [`Zstd`](http://facebook.github.io/zstd/zstd_manual.html) for more information - #[cfg(feature = "zstd-compression")] - Zstd { - /// Zstd compression level - level: Option, - }, - /// Zlib compressor. - /// See [`Zlib`](https://zlib.net/) for more information. - #[cfg(feature = "zlib-compression")] - Zlib { - /// Zlib compression level - level: Option, - }, - /// Snappy compressor. - /// See [`Snappy`](http://google.github.io/snappy/) for more information. - #[cfg(feature = "snappy-compression")] - Snappy, -} - -impl Compressor { - #[allow(unused_variables)] - pub(crate) fn write_zlib_level(&mut self, level: i32) { - #[cfg(feature = "zlib-compression")] - if let Compressor::Zlib { - level: ref mut zlib_level, - } = *self - { - *zlib_level = if level == -1 { None } else { Some(level) } - } - } - - pub(crate) fn parse_str(s: &str) -> Result { - match s.to_lowercase().as_str() { - #[cfg(feature = "zlib-compression")] - "zlib" => Ok(Compressor::Zlib { level: None }), - #[cfg(feature = "zstd-compression")] - "zstd" => Ok(Compressor::Zstd { level: None }), - #[cfg(feature = "snappy-compression")] - "snappy" => Ok(Compressor::Snappy), - other => Err(Error::from(ErrorKind::InvalidArgument { - message: format!("Invalid compressor: {} was supplied but is invalid", other), - })), - } - } +pub(crate) mod compressors; +pub(crate) mod decompress; - pub(crate) fn name(&self) -> &'static str { - match *self { - #[cfg(feature = "zstd-compression")] - Compressor::Zstd { .. } => "zstd", - #[cfg(feature = "zlib-compression")] - Compressor::Zlib { .. } => "zlib", - #[cfg(feature = "snappy-compression")] - Compressor::Snappy => "snappy", - } - } - - pub(crate) fn id(&self) -> CompressorId { - match *self { - #[cfg(feature = "zstd-compression")] - Compressor::Zstd { level: _ } => CompressorId::Zstd, - #[cfg(feature = "zlib-compression")] - Compressor::Zlib { level: _ } => CompressorId::Zlib, - #[cfg(feature = "snappy-compression")] - Compressor::Snappy => CompressorId::Snappy, - } - } - - pub(crate) fn validate(&self) -> Result<()> { - #[allow(unreachable_patterns)] - match *self { - #[cfg(feature = "zstd-compression")] - Compressor::Zstd { level: Some(level) } - if !zstd::compression_level_range().contains(&level) => - { - Err(Error::from(ErrorKind::InvalidArgument { - message: format!("invalid zstd level: {}", level), - })) - } - #[cfg(feature = "zlib-compression")] - Compressor::Zlib { level: Some(level) } if !(-1..10).contains(&level) => { - Err(Error::from(ErrorKind::InvalidArgument { - message: format!("invalid zlib level: {}", level), - })) - } - _ => Ok(()), - } - } - - pub(crate) fn to_encoder(&self) -> Result { - match *self { - #[cfg(feature = "zstd-compression")] - Compressor::Zstd { level } => { - let encoder = - zstd::Encoder::new(vec![], level.unwrap_or(zstd::DEFAULT_COMPRESSION_LEVEL)) - .map_err(|e| { - Error::from(ErrorKind::Internal { - message: format!( - "an error occurred getting a new zstd encoder: {}", - e - ), - }) - })?; - - Ok(Encoder::Zstd { encoder }) - } - #[cfg(feature = "zlib-compression")] - Compressor::Zlib { level } => { - let level = match level { - Some(level) => Compression::new(level.try_into().map_err(|e| { - Error::from(ErrorKind::Internal { - message: format!("an invalid zlib compression level was given: {}", e), - }) - })?), - _ => Compression::default(), - }; - let encoder = ZlibEncoder::new(vec![], level); - Ok(Encoder::Zlib { encoder }) - } - #[cfg(feature = "snappy-compression")] - Compressor::Snappy => Ok(Encoder::Snappy { bytes: vec![] }), - } - } -} - -pub(crate) enum Encoder { - #[cfg(feature = "zstd-compression")] - Zstd { - encoder: zstd::Encoder<'static, Vec>, - }, - #[cfg(feature = "zlib-compression")] - Zlib { encoder: ZlibEncoder> }, - #[cfg(feature = "snappy-compression")] - Snappy { bytes: Vec }, -} - -#[allow(unused_variables)] -impl Encoder { - pub(crate) fn write_all(&mut self, buf: &[u8]) -> Result<()> { - match *self { - #[cfg(feature = "zstd-compression")] - Encoder::Zstd { ref mut encoder } => encoder.write_all(buf).map_err(|e| { - ErrorKind::Internal { - message: format!("an error occurred writing to the zstd encoder: {}", e), - } - .into() - }), - #[cfg(feature = "zlib-compression")] - Encoder::Zlib { ref mut encoder } => encoder.write_all(buf).map_err(|e| { - ErrorKind::Internal { - message: format!("an error occurred writing to the zlib encoder: {}", e), - } - .into() - }), - #[cfg(feature = "snappy-compression")] - Encoder::Snappy { ref mut bytes } => bytes.write_all(buf).map_err(|e| { - ErrorKind::Internal { - message: format!("an error occurred writing to the snappy encoder: {}", e), - } - .into() - }), - } - } - - pub(crate) fn finish(self) -> Result> { - match self { - #[cfg(feature = "zstd-compression")] - Encoder::Zstd { encoder } => encoder.finish().map_err(|e| { - ErrorKind::Internal { - message: format!("an error occurred finishing zstd encoder: {}", e), - } - .into() - }), - #[cfg(feature = "zlib-compression")] - Encoder::Zlib { encoder } => encoder.finish().map_err(|e| { - ErrorKind::Internal { - message: format!("an error occurred finishing zlib encoder: {}", e), - } - .into() - }), - #[cfg(feature = "snappy-compression")] - Encoder::Snappy { bytes } => { - // The server doesn't use snappy frame format, so we need to use snap::raw::Encoder - // rather than snap::write::FrameEncoder. Likewise for decoding. - let mut compressor = snap::raw::Encoder::new(); - compressor.compress_vec(bytes.as_slice()).map_err(|e| { - ErrorKind::Internal { - message: format!("an error occurred finishing snappy encoder: {}", e), - } - .into() - }) - } - } - } -} - -#[derive(Clone, Debug)] -pub(crate) enum Decoder { - #[cfg(feature = "zstd-compression")] - Zstd, - #[cfg(feature = "zlib-compression")] - Zlib, - #[cfg(feature = "snappy-compression")] - Snappy, - Noop, -} - -impl Decoder { - pub(crate) fn decode(self, source: &[u8]) -> Result> { - match self { - #[cfg(feature = "zstd-compression")] - Decoder::Zstd => { - let mut ret = Vec::new(); - zstd::stream::copy_decode(source, &mut ret).map_err(|e| { - Error::from(ErrorKind::Internal { - message: format!("Could not decode using zstd decoder: {}", e), - }) - })?; - Ok(ret) - } - #[cfg(feature = "zlib-compression")] - Decoder::Zlib => { - let mut decoder = ZlibDecoder::new(vec![]); - decoder.write_all(source)?; - decoder.finish().map_err(|e| { - ErrorKind::Internal { - message: format!("Could not decode using zlib decoder: {}", e), - } - .into() - }) - } - #[cfg(feature = "snappy-compression")] - Decoder::Snappy => { - let mut decompressor = snap::raw::Decoder::new(); - decompressor.decompress_vec(source).map_err(|e| { - ErrorKind::Internal { - message: format!("Could not decode using snappy decoder: {}", e), - } - .into() - }) - } - Decoder::Noop => Ok(source.to_vec()), - } - } - - pub(crate) fn from_u8(id: u8) -> Result { - let compressor_id = CompressorId::from_u8(id)?; - match compressor_id { - CompressorId::Noop => Ok(Decoder::Noop), - #[cfg(feature = "snappy-compression")] - CompressorId::Snappy => Ok(Decoder::Snappy), - #[cfg(feature = "zlib-compression")] - CompressorId::Zlib => Ok(Decoder::Zlib), - #[cfg(feature = "zstd-compression")] - CompressorId::Zstd => Ok(Decoder::Zstd), - } - } -} +const NOOP_COMPRESSOR_ID: u8 = 0; +#[cfg(feature = "snappy-compression")] +const SNAPPY_COMPRESSOR_ID: u8 = 1; +#[cfg(feature = "zlib-compression")] +const ZLIB_COMPRESSOR_ID: u8 = 2; +#[cfg(feature = "zstd-compression")] +const ZSTD_COMPRESSOR_ID: u8 = 3; diff --git a/src/compression/compress.rs b/src/compression/compress.rs new file mode 100644 index 000000000..6715290fd --- /dev/null +++ b/src/compression/compress.rs @@ -0,0 +1,80 @@ +use crate::{ + error::{ErrorKind, Result}, + options::Compressor, +}; + +impl Compressor { + pub(crate) fn compress(&self, flag_bytes: &[u8], section_bytes: &[u8]) -> Result> { + let result = match *self { + #[cfg(feature = "zstd-compression")] + Self::Zstd { level } => compress_zstd(level, flag_bytes, section_bytes), + #[cfg(feature = "zlib-compression")] + Self::Zlib { level } => compress_zlib(level, flag_bytes, section_bytes), + #[cfg(feature = "snappy-compression")] + Self::Snappy => compress_snappy(flag_bytes, section_bytes), + }; + + result.map_err(|error| { + ErrorKind::Internal { + message: format!( + "Failed to compress message with {} compression: {}", + self.name(), + error + ), + } + .into() + }) + } +} + +#[cfg(feature = "zstd-compression")] +fn compress_zstd( + level: Option, + flag_bytes: &[u8], + section_bytes: &[u8], +) -> std::io::Result> { + use std::io::Write; + + use zstd::{Encoder, DEFAULT_COMPRESSION_LEVEL}; + + let level = level.unwrap_or(DEFAULT_COMPRESSION_LEVEL); + let mut encoder = Encoder::new(Vec::new(), level)?; + + encoder.write_all(flag_bytes)?; + encoder.write_all(section_bytes)?; + + encoder.finish() +} + +#[cfg(feature = "zlib-compression")] +fn compress_zlib( + level: Option, + flag_bytes: &[u8], + section_bytes: &[u8], +) -> std::io::Result> { + use std::io::Write; + + use flate2::{write::ZlibEncoder, Compression}; + + let level = match level { + Some(level) => Compression::new(level), + None => Compression::default(), + }; + let mut encoder = ZlibEncoder::new(Vec::new(), level); + + encoder.write_all(flag_bytes)?; + encoder.write_all(section_bytes)?; + + encoder.finish() +} + +#[cfg(feature = "snappy-compression")] +fn compress_snappy(flag_bytes: &[u8], section_bytes: &[u8]) -> std::io::Result> { + use snap::raw::Encoder; + + let mut uncompressed = flag_bytes.to_vec(); + uncompressed.extend_from_slice(section_bytes); + + let mut encoder = Encoder::new(); + Ok(encoder.compress_vec(&uncompressed)?) +} diff --git a/src/compression/compressors.rs b/src/compression/compressors.rs new file mode 100644 index 000000000..9e6bc2e29 --- /dev/null +++ b/src/compression/compressors.rs @@ -0,0 +1,140 @@ +use std::str::FromStr; + +use crate::error::{Error, ErrorKind, Result}; + +/// The compressors that may be used to compress messages sent to and decompress messages returned +/// from the server. Note that each variant requires enabling a corresponding feature flag. +#[derive(Clone, Debug, PartialEq)] +#[non_exhaustive] +pub enum Compressor { + /// `zstd` compression. See [the `zstd` manual](http://facebook.github.io/zstd/zstd_manual.html) + /// for more information. + #[cfg(feature = "zstd-compression")] + Zstd { + /// The compression level to use. It is an error to specify a value outside of the + /// supported compression levels returned by [zstd::compression_level_range]. If no value + /// is specified, the default value ([zstd::DEFAULT_COMPRESSION_LEVEL]) will be used. + /// Higher levels correlate to smaller compression but slower performance. + level: Option, + }, + /// `zlib` compression. See [the `zlib` documentation](https://zlib.net/) for more information. + #[cfg(feature = "zlib-compression")] + Zlib { + /// The compression level to use. If no value is specified, the default value + /// ([flate2::Compression::default]) will be used. Higher levels correlate to smaller + /// compression but slower performance. + level: Option, + }, + /// `snappy` compression. See [the `snappy` documentation](http://google.github.io/snappy/) + /// for more information. + #[cfg(feature = "snappy-compression")] + Snappy, +} + +impl Compressor { + pub(crate) fn name(&self) -> &'static str { + match *self { + #[cfg(feature = "zstd-compression")] + Compressor::Zstd { .. } => "zstd", + #[cfg(feature = "zlib-compression")] + Compressor::Zlib { .. } => "zlib", + #[cfg(feature = "snappy-compression")] + Compressor::Snappy => "snappy", + } + } + + pub(crate) fn id(&self) -> u8 { + match self { + #[cfg(feature = "zstd-compression")] + Self::Zstd { .. } => super::ZSTD_COMPRESSOR_ID, + #[cfg(feature = "zlib-compression")] + Self::Zlib { .. } => super::ZLIB_COMPRESSOR_ID, + #[cfg(feature = "snappy-compression")] + Self::Snappy => super::SNAPPY_COMPRESSOR_ID, + } + } + + pub(crate) fn validate(&self) -> Result<()> { + #[cfg(feature = "zstd-compression")] + if let Self::Zstd { level: Some(level) } = self { + let valid_levels = zstd::compression_level_range(); + if !valid_levels.contains(level) { + return Err(ErrorKind::InvalidArgument { + message: format!( + "Invalid zstd compression level {}: compression level must be within the \ + range {:?}", + level, valid_levels + ), + } + .into()); + } + } + + #[cfg(feature = "zlib-compression")] + if let Self::Zlib { level: Some(level) } = self { + if *level > 9 { + return Err(ErrorKind::InvalidArgument { + message: format!( + "Invalid zlib compression level {}: compression level must be between 0 \ + and 9 (inclusive)", + level + ), + } + .into()); + } + } + + Ok(()) + } + + #[cfg(feature = "zlib-compression")] + pub(crate) fn write_zlib_level(&mut self, uri_level: i32) -> Result<()> { + // This pattern is irrefutable when only zlib-compression is enabled. + #[allow(irrefutable_let_patterns)] + if let Compressor::Zlib { ref mut level } = *self { + if uri_level == -1 { + *level = None; + } else { + let zlib_compression_level = + u32::try_from(uri_level).map_err(|_| ErrorKind::InvalidArgument { + message: format!( + "Invalid zlib compression level specified: {}\nzlib compression level \ + must be a nonnegative integer or -1 to use the default compression \ + level", + uri_level + ), + })?; + *level = Some(zlib_compression_level); + } + } + Ok(()) + } +} + +impl FromStr for Compressor { + type Err = Error; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + #[cfg(feature = "zstd-compression")] + "zstd" => Ok(Self::Zstd { level: None }), + #[cfg(feature = "zlib-compression")] + "zlib" => Ok(Self::Zlib { level: None }), + #[cfg(feature = "snappy-compression")] + "snappy" => Ok(Self::Snappy), + other if other == "zstd" || other == "zlib" || other == "snappy" => { + Err(ErrorKind::InvalidArgument { + message: format!( + "Enable the {}-compression feature flag to use {} compression", + other, other + ), + } + .into()) + } + other => Err(ErrorKind::InvalidArgument { + message: format!("Unsupported compressor: {}", other), + } + .into()), + } + } +} diff --git a/src/compression/decompress.rs b/src/compression/decompress.rs new file mode 100644 index 000000000..f57f6546b --- /dev/null +++ b/src/compression/decompress.rs @@ -0,0 +1,70 @@ +use crate::error::{ErrorKind, Result}; + +/// Decompresses the given message with the decompression algorithm indicated by the given +/// ID. Returns an error if decompression fails or if the ID is unsupported. +pub(crate) fn decompress_message(message: &[u8], compressor_id: u8) -> Result> { + if compressor_id == super::NOOP_COMPRESSOR_ID { + return Ok(message.into()); + } + + #[cfg(feature = "zstd-compression")] + if compressor_id == super::ZSTD_COMPRESSOR_ID { + return decompress_zstd(message); + } + + #[cfg(feature = "zlib-compression")] + if compressor_id == super::ZLIB_COMPRESSOR_ID { + return decompress_zlib(message); + } + + #[cfg(feature = "snappy-compression")] + if compressor_id == super::SNAPPY_COMPRESSOR_ID { + return decompress_snappy(message); + } + + Err(ErrorKind::InvalidResponse { + message: format!( + "Unsupported compressor ID returned from the server: {}", + compressor_id + ), + } + .into()) +} + +#[cfg(feature = "zstd-compression")] +fn decompress_zstd(message: &[u8]) -> Result> { + let mut decompressed = Vec::new(); + zstd::stream::copy_decode(message, &mut decompressed).map_err(|error| ErrorKind::Internal { + message: format!("Could not decompress message with zstd: {}", error), + })?; + Ok(decompressed) +} + +#[cfg(feature = "zlib-compression")] +fn decompress_zlib(message: &[u8]) -> Result> { + use std::io::Write; + + use flate2::write::ZlibDecoder; + + let mut decoder = ZlibDecoder::new(Vec::new()); + decoder.write_all(message)?; + decoder.finish().map_err(|error| { + ErrorKind::Internal { + message: format!("Could not decompress message with zlib: {}", error), + } + .into() + }) +} + +#[cfg(feature = "snappy-compression")] +fn decompress_snappy(message: &[u8]) -> Result> { + use snap::raw::Decoder; + + let mut decoder = Decoder::new(); + decoder.decompress_vec(message).map_err(|error| { + ErrorKind::Internal { + message: format!("Could not decompress message with snappy: {}", error), + } + .into() + }) +} diff --git a/src/compression/test.rs b/src/compression/test.rs deleted file mode 100644 index 7e9ae70ec..000000000 --- a/src/compression/test.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Tests OP_COMPRESSED. To actually test compression you need to look at -// server logs to see if decompression is happening. Even if these tests -// are run against a server that does not support compression -// these tests won't fail because the messages will be sent without compression -// (as indicated in the specs). - -use bson::{doc, Bson}; - -use crate::{ - client::options::ClientOptions, - compression::{Compressor, CompressorId, Decoder}, - test::{get_client_options, TestClient}, -}; - -#[cfg(feature = "zlib-compression")] -#[test] -fn test_zlib_compressor() { - let zlib_compressor = Compressor::Zlib { level: Some(4) }; - assert_eq!(CompressorId::Zlib, zlib_compressor.id()); - let mut encoder = zlib_compressor.to_encoder().unwrap(); - assert!(encoder.write_all(b"foo").is_ok()); - assert!(encoder.write_all(b"bar").is_ok()); - assert!(encoder.write_all(b"ZLIB").is_ok()); - - let compressed_bytes = encoder.finish().unwrap(); - - let decoder = Decoder::from_u8(CompressorId::Zlib as u8).unwrap(); - let original_bytes = decoder.decode(compressed_bytes.as_slice()).unwrap(); - assert_eq!(b"foobarZLIB", original_bytes.as_slice()); -} - -#[cfg(feature = "zstd-compression")] -#[test] -fn test_zstd_compressor() { - let zstd_compressor = Compressor::Zstd { level: None }; - assert_eq!(CompressorId::Zstd, zstd_compressor.id()); - let mut encoder = zstd_compressor.to_encoder().unwrap(); - assert!(encoder.write_all(b"foo").is_ok()); - assert!(encoder.write_all(b"bar").is_ok()); - assert!(encoder.write_all(b"ZSTD").is_ok()); - - let compressed_bytes = encoder.finish().unwrap(); - - let decoder = Decoder::from_u8(CompressorId::Zstd as u8).unwrap(); - let original_bytes = decoder.decode(compressed_bytes.as_slice()).unwrap(); - assert_eq!(b"foobarZSTD", original_bytes.as_slice()); -} - -#[cfg(feature = "snappy-compression")] -#[test] -fn test_snappy_compressor() { - let snappy_compressor = Compressor::Snappy; - assert_eq!(CompressorId::Snappy, snappy_compressor.id()); - let mut encoder = snappy_compressor.to_encoder().unwrap(); - assert!(encoder.write_all(b"foo").is_ok()); - assert!(encoder.write_all(b"bar").is_ok()); - assert!(encoder.write_all(b"SNAPPY").is_ok()); - - let compressed_bytes = encoder.finish().unwrap(); - - let decoder = Decoder::from_u8(CompressorId::Snappy as u8).unwrap(); - let original_bytes = decoder.decode(compressed_bytes.as_slice()).unwrap(); - assert_eq!(b"foobarSNAPPY", original_bytes.as_slice()); -} - -#[tokio::test] -#[cfg(feature = "zlib-compression")] -async fn ping_server_with_zlib_compression() { - let mut client_options = get_client_options().await.clone(); - client_options.compressors = Some(vec![Compressor::Zlib { level: Some(4) }]); - send_ping_with_compression(client_options).await; -} - -#[tokio::test] -#[cfg(feature = "zstd-compression")] -async fn ping_server_with_zstd_compression() { - let mut client_options = get_client_options().await.clone(); - client_options.compressors = Some(vec![Compressor::Zstd { level: None }]); - send_ping_with_compression(client_options).await; -} - -#[tokio::test] -#[cfg(feature = "snappy-compression")] -async fn ping_server_with_snappy_compression() { - let mut client_options = get_client_options().await.clone(); - client_options.compressors = Some(vec![Compressor::Snappy]); - send_ping_with_compression(client_options).await; -} - -#[tokio::test] -#[cfg(all( - feature = "zstd-compression", - feature = "zlib-compression", - feature = "snappy-compression" -))] -async fn ping_server_with_all_compressors() { - let mut client_options = get_client_options().await.clone(); - client_options.compressors = Some(vec![ - Compressor::Zlib { level: None }, - Compressor::Snappy, - Compressor::Zstd { level: None }, - ]); - send_ping_with_compression(client_options).await; -} - -async fn send_ping_with_compression(client_options: ClientOptions) { - let client = TestClient::with_options(Some(client_options)).await; - let ret = client.database("admin").run_command(doc! {"ping": 1}).await; - - assert!(ret.is_ok()); - let ret = ret.unwrap(); - assert_eq!(ret.get("ok"), Some(Bson::Double(1.0)).as_ref()); -} diff --git a/src/concern/test.rs b/src/concern/test.rs index bbad87f9e..a549fb1b3 100644 --- a/src/concern/test.rs +++ b/src/concern/test.rs @@ -1,22 +1,13 @@ use std::time::Duration; +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ bson::{doc, Bson, Document}, error::ErrorKind, - options::{ - Acknowledgment, - FindOneAndDeleteOptions, - FindOneAndReplaceOptions, - FindOneAndUpdateOptions, - FindOneOptions, - InsertManyOptions, - InsertOneOptions, - ReadConcern, - ReplaceOptions, - TransactionOptions, - WriteConcern, - }, - test::{EventClient, TestClient}, + options::{Acknowledgment, ReadConcern, WriteConcern}, + test::TestClient, + Client, Collection, }; @@ -112,9 +103,9 @@ async fn inconsistent_write_concern_rejected() { journal: true.into(), w_timeout: None, }; - let options = InsertOneOptions::builder().write_concern(wc).build(); let error = coll - .insert_one(doc! {}, options) + .insert_one(doc! {}) + .write_concern(wc) .await .expect_err("insert should fail"); assert!(matches!(*error.kind, ErrorKind::InvalidArgument { .. })); @@ -131,9 +122,9 @@ async fn unacknowledged_write_concern_rejected() { journal: false.into(), w_timeout: None, }; - let options = InsertOneOptions::builder().write_concern(wc).build(); let error = coll - .insert_one(doc! {}, options) + .insert_one(doc! {}) + .write_concern(wc) .await .expect_err("insert should fail"); assert!(matches!(*error.kind, ErrorKind::InvalidArgument { .. })); @@ -142,6 +133,7 @@ async fn unacknowledged_write_concern_rejected() { #[tokio::test] #[function_name::named] async fn snapshot_read_concern() { + #[allow(deprecated)] let client = EventClient::new().await; // snapshot read concern was introduced in 4.0 if client.server_version_lt(4, 0) { @@ -154,21 +146,20 @@ async fn snapshot_read_concern() { if client.supports_transactions() { let mut session = client.start_session().await.unwrap(); - let options = TransactionOptions::builder() + session + .start_transaction() .read_concern(ReadConcern::snapshot()) - .build(); - session.start_transaction(options).await.unwrap(); - let result = coll.find_one_with_session(None, None, &mut session).await; + .await + .unwrap(); + let result = coll.find_one(doc! {}).session(&mut session).await; assert!(result.is_ok()); assert_event_contains_read_concern(&client).await; } if client.server_version_lt(4, 9) { - let options = FindOneOptions::builder() - .read_concern(ReadConcern::snapshot()) - .build(); let error = coll - .find_one(None, options) + .find_one(doc! {}) + .read_concern(ReadConcern::snapshot()) .await .expect_err("non-transaction find one with snapshot read concern should fail"); // ensure that an error from the server is returned @@ -177,8 +168,10 @@ async fn snapshot_read_concern() { } } +#[allow(deprecated)] async fn assert_event_contains_read_concern(client: &EventClient) { let event = client + .events .get_command_started_events(&["find"]) .into_iter() .next() @@ -197,36 +190,29 @@ async fn assert_event_contains_read_concern(client: &EventClient) { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_insert_one() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_one( - doc! { "foo": "bar" }, - InsertOneOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(true) - .build(), - ) - .build(), - ) - .await - .unwrap(); - coll.insert_one( - doc! { "foo": "bar" }, - InsertOneOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(false) - .build(), - ) - .build(), - ) - .await - .unwrap(); + coll.insert_one(doc! { "foo": "bar" }) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(true) + .build(), + ) + .await + .unwrap(); + coll.insert_one(doc! { "foo": "bar" }) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(false) + .build(), + ) + .await + .unwrap(); assert_eq!( command_write_concerns(&client, "insert"), @@ -246,36 +232,29 @@ async fn command_contains_write_concern_insert_one() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_insert_many() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_many( - &[doc! { "foo": "bar" }], - InsertManyOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(true) - .build(), - ) - .build(), - ) - .await - .unwrap(); - coll.insert_many( - &[doc! { "foo": "bar" }], - InsertManyOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(false) - .build(), - ) - .build(), - ) - .await - .unwrap(); + coll.insert_many(&[doc! { "foo": "bar" }]) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(true) + .build(), + ) + .await + .unwrap(); + coll.insert_many(&[doc! { "foo": "bar" }]) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(false) + .build(), + ) + .await + .unwrap(); assert_eq!( command_write_concerns(&client, "insert"), @@ -295,11 +274,12 @@ async fn command_contains_write_concern_insert_many() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_update_one() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_one(doc! { "foo": "bar" }, None).await.unwrap(); + coll.insert_one(doc! { "foo": "bar" }).await.unwrap(); coll.update_one(doc! { "foo": "bar" }, doc! { "$set": { "foo": "baz" } }) .write_concern( WriteConcern::builder() @@ -337,11 +317,12 @@ async fn command_contains_write_concern_update_one() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_update_many() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }], None) + coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }]) .await .unwrap(); coll.update_many(doc! { "foo": "bar" }, doc! { "$set": { "foo": "baz" } }) @@ -381,39 +362,30 @@ async fn command_contains_write_concern_update_many() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_replace_one() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_one(doc! { "foo": "bar" }, None).await.unwrap(); - coll.replace_one( - doc! { "foo": "bar" }, - doc! { "baz": "fun" }, - ReplaceOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(true) - .build(), - ) - .build(), - ) - .await - .unwrap(); - coll.replace_one( - doc! { "foo": "bar" }, - doc! { "baz": "fun" }, - ReplaceOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(false) - .build(), - ) - .build(), - ) - .await - .unwrap(); + coll.insert_one(doc! { "foo": "bar" }).await.unwrap(); + coll.replace_one(doc! { "foo": "bar" }, doc! { "baz": "fun" }) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(true) + .build(), + ) + .await + .unwrap(); + coll.replace_one(doc! { "foo": "bar" }, doc! { "baz": "fun" }) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(false) + .build(), + ) + .await + .unwrap(); assert_eq!( command_write_concerns(&client, "update"), @@ -433,11 +405,12 @@ async fn command_contains_write_concern_replace_one() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_delete_one() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }], None) + coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }]) .await .unwrap(); coll.delete_one(doc! { "foo": "bar" }) @@ -477,11 +450,12 @@ async fn command_contains_write_concern_delete_one() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_delete_many() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }], None) + coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }]) .await .unwrap(); coll.delete_many(doc! { "foo": "bar" }) @@ -493,7 +467,7 @@ async fn command_contains_write_concern_delete_many() { ) .await .unwrap(); - coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }], None) + coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }]) .await .unwrap(); coll.delete_many(doc! { "foo": "bar" }) @@ -524,39 +498,32 @@ async fn command_contains_write_concern_delete_many() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_find_one_and_delete() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }], None) - .await - .unwrap(); - coll.find_one_and_delete( - doc! { "foo": "bar" }, - FindOneAndDeleteOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(true) - .build(), - ) - .build(), - ) - .await - .unwrap(); - coll.find_one_and_delete( - doc! { "foo": "bar" }, - FindOneAndDeleteOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(false) - .build(), - ) - .build(), - ) - .await - .unwrap(); + coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }]) + .await + .unwrap(); + coll.find_one_and_delete(doc! { "foo": "bar" }) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(true) + .build(), + ) + .await + .unwrap(); + coll.find_one_and_delete(doc! { "foo": "bar" }) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(false) + .build(), + ) + .await + .unwrap(); assert_eq!( command_write_concerns(&client, "findAndModify"), @@ -576,41 +543,32 @@ async fn command_contains_write_concern_find_one_and_delete() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_find_one_and_replace() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }], None) - .await - .unwrap(); - coll.find_one_and_replace( - doc! { "foo": "bar" }, - doc! { "baz": "fun" }, - FindOneAndReplaceOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(true) - .build(), - ) - .build(), - ) - .await - .unwrap(); - coll.find_one_and_replace( - doc! { "foo": "bar" }, - doc! { "baz": "fun" }, - FindOneAndReplaceOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(false) - .build(), - ) - .build(), - ) - .await - .unwrap(); + coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }]) + .await + .unwrap(); + coll.find_one_and_replace(doc! { "foo": "bar" }, doc! { "baz": "fun" }) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(true) + .build(), + ) + .await + .unwrap(); + coll.find_one_and_replace(doc! { "foo": "bar" }, doc! { "baz": "fun" }) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(false) + .build(), + ) + .await + .unwrap(); assert_eq!( command_write_concerns(&client, "findAndModify"), @@ -630,41 +588,32 @@ async fn command_contains_write_concern_find_one_and_replace() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_find_one_and_update() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }], None) - .await - .unwrap(); - coll.find_one_and_update( - doc! { "foo": "bar" }, - doc! { "$set": { "foo": "fun" } }, - FindOneAndUpdateOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(true) - .build(), - ) - .build(), - ) - .await - .unwrap(); - coll.find_one_and_update( - doc! { "foo": "bar" }, - doc! { "$set": { "foo": "fun" } }, - FindOneAndUpdateOptions::builder() - .write_concern( - WriteConcern::builder() - .w(Acknowledgment::Nodes(1)) - .journal(false) - .build(), - ) - .build(), - ) - .await - .unwrap(); + coll.insert_many(&[doc! { "foo": "bar" }, doc! { "foo": "bar" }]) + .await + .unwrap(); + coll.find_one_and_update(doc! { "foo": "bar" }, doc! { "$set": { "foo": "fun" } }) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(true) + .build(), + ) + .await + .unwrap(); + coll.find_one_and_update(doc! { "foo": "bar" }, doc! { "$set": { "foo": "fun" } }) + .write_concern( + WriteConcern::builder() + .w(Acknowledgment::Nodes(1)) + .journal(false) + .build(), + ) + .await + .unwrap(); assert_eq!( command_write_concerns(&client, "findAndModify"), @@ -684,11 +633,12 @@ async fn command_contains_write_concern_find_one_and_update() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_aggregate() { + #[allow(deprecated)] let client = EventClient::new().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - coll.insert_one(doc! { "foo": "bar" }, None).await.unwrap(); + coll.insert_one(doc! { "foo": "bar" }).await.unwrap(); coll.aggregate(vec![ doc! { "$match": { "foo": "bar" } }, doc! { "$addFields": { "foo": "baz" } }, @@ -734,12 +684,15 @@ async fn command_contains_write_concern_aggregate() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_drop() { - let client = EventClient::new().await; + #[allow(deprecated)] + let client = Client::test_builder().event_client().build().await; let coll: Collection = client.database("test").collection(function_name!()); coll.drop().await.unwrap(); - client.clear_cached_events(); - coll.insert_one(doc! { "foo": "bar" }, None).await.unwrap(); + #[allow(deprecated)] + let mut events = client.events.clone(); + events.clear_cached_events(); + coll.insert_one(doc! { "foo": "bar" }).await.unwrap(); coll.drop() .write_concern( WriteConcern::builder() @@ -749,7 +702,7 @@ async fn command_contains_write_concern_drop() { ) .await .unwrap(); - coll.insert_one(doc! { "foo": "bar" }, None).await.unwrap(); + coll.insert_one(doc! { "foo": "bar" }).await.unwrap(); coll.drop() .write_concern( WriteConcern::builder() @@ -778,6 +731,7 @@ async fn command_contains_write_concern_drop() { #[tokio::test] #[function_name::named] async fn command_contains_write_concern_create_collection() { + #[allow(deprecated)] let client = EventClient::new().await; let db = client.database("test"); let coll: Collection = db.collection(function_name!()); @@ -818,8 +772,10 @@ async fn command_contains_write_concern_create_collection() { ); } +#[allow(deprecated)] fn command_write_concerns(client: &EventClient, key: &str) -> Vec { client + .events .get_command_started_events(&[key]) .into_iter() .map(|d| d.command.get_document("writeConcern").unwrap().clone()) diff --git a/src/cursor.rs b/src/cursor.rs index 8dae51d2d..d22474441 100644 --- a/src/cursor.rs +++ b/src/cursor.rs @@ -67,7 +67,7 @@ pub(crate) use common::{ /// used in conjunction with the `?` operator. /// /// ```rust -/// # use mongodb::{bson::Document, Client, error::Result}; +/// # use mongodb::{bson::{Document, doc}, Client, error::Result}; /// # /// # async fn do_stuff() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://example.com").await?; @@ -75,7 +75,7 @@ pub(crate) use common::{ /// # /// use futures::stream::{StreamExt, TryStreamExt}; /// -/// let mut cursor = coll.find(None, None).await?; +/// let mut cursor = coll.find(doc! {}).await?; /// // regular Stream uses next() and iterates over Option> /// while let Some(doc) = cursor.next().await { /// println!("{}", doc?) @@ -83,7 +83,7 @@ pub(crate) use common::{ /// // regular Stream uses collect() and collects into a Vec> /// let v: Vec> = cursor.collect().await; /// -/// let mut cursor = coll.find(None, None).await?; +/// let mut cursor = coll.find(doc! {}).await?; /// // TryStream uses try_next() and iterates over Result> /// while let Some(doc) = cursor.try_next().await? { /// println!("{}", doc) @@ -190,11 +190,11 @@ impl Cursor { /// calling [`Cursor::advance`] first or after [`Cursor::advance`] returns an error / false. /// /// ``` - /// # use mongodb::{Client, bson::Document, error::Result}; + /// # use mongodb::{Client, bson::{Document, doc}, error::Result}; /// # async fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017").await?; /// # let coll = client.database("stuff").collection::("stuff"); - /// let mut cursor = coll.find(None, None).await?; + /// let mut cursor = coll.find(doc! {}).await?; /// while cursor.advance().await? { /// println!("{:?}", cursor.current()); /// } @@ -223,11 +223,11 @@ impl Cursor { /// or without calling [`Cursor::advance`] at all may result in a panic. /// /// ``` - /// # use mongodb::{Client, bson::Document, error::Result}; + /// # use mongodb::{Client, bson::{Document, doc}, error::Result}; /// # async fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017").await?; /// # let coll = client.database("stuff").collection::("stuff"); - /// let mut cursor = coll.find(None, None).await?; + /// let mut cursor = coll.find(doc! {}).await?; /// while cursor.advance().await? { /// println!("{:?}", cursor.current()); /// } @@ -246,7 +246,7 @@ impl Cursor { /// true or without calling [`Cursor::advance`] at all may result in a panic. /// /// ``` - /// # use mongodb::{Client, error::Result}; + /// # use mongodb::{Client, error::Result, bson::doc}; /// # async fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017").await?; /// # let db = client.database("foo"); @@ -259,7 +259,7 @@ impl Cursor { /// } /// /// let coll = db.collection::("cat"); - /// let mut cursor = coll.find(None, None).await?; + /// let mut cursor = coll.find(doc! {}).await?; /// while cursor.advance().await? { /// println!("{:?}", cursor.deserialize_current()?); /// } diff --git a/src/cursor/session.rs b/src/cursor/session.rs index 39faa7a3f..6f460ac63 100644 --- a/src/cursor/session.rs +++ b/src/cursor/session.rs @@ -40,7 +40,7 @@ use crate::{ /// [`SessionCursor::stream`]: /// /// ```rust -/// # use mongodb::{bson::Document, Client, error::Result, ClientSession, SessionCursor}; +/// # use mongodb::{bson::{Document, doc}, Client, error::Result, ClientSession, SessionCursor}; /// # /// # async fn do_stuff() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://example.com").await?; @@ -48,7 +48,7 @@ use crate::{ /// # let coll = client.database("foo").collection::("bar"); /// # /// // iterate using next() -/// let mut cursor = coll.find_with_session(None, None, &mut session).await?; +/// let mut cursor = coll.find(doc! {}).session(&mut session).await?; /// while let Some(doc) = cursor.next(&mut session).await.transpose()? { /// println!("{}", doc) /// } @@ -56,7 +56,7 @@ use crate::{ /// // iterate using `Stream`: /// use futures::stream::TryStreamExt; /// -/// let mut cursor = coll.find_with_session(None, None, &mut session).await?; +/// let mut cursor = coll.find(doc! {}).session(&mut session).await?; /// let results: Vec<_> = cursor.stream(&mut session).try_collect().await?; /// # /// # Ok(()) @@ -129,23 +129,23 @@ where /// use futures::stream::TryStreamExt; /// /// // iterate over the results - /// let mut cursor = coll.find_with_session(doc! { "x": 1 }, None, &mut session).await?; + /// let mut cursor = coll.find(doc! { "x": 1 }).session(&mut session).await?; /// while let Some(doc) = cursor.stream(&mut session).try_next().await? { /// println!("{}", doc); /// } /// /// // collect the results - /// let mut cursor1 = coll.find_with_session(doc! { "x": 1 }, None, &mut session).await?; + /// let mut cursor1 = coll.find(doc! { "x": 1 }).session(&mut session).await?; /// let v: Vec = cursor1.stream(&mut session).try_collect().await?; /// /// // use session between iterations - /// let mut cursor2 = coll.find_with_session(doc! { "x": 1 }, None, &mut session).await?; + /// let mut cursor2 = coll.find(doc! { "x": 1 }).session(&mut session).await?; /// loop { /// let doc = match cursor2.stream(&mut session).try_next().await? { /// Some(d) => d, /// None => break, /// }; - /// other_coll.insert_one_with_session(doc, None, &mut session).await?; + /// other_coll.insert_one(doc).session(&mut session).await?; /// } /// # Ok::<(), mongodb::error::Error>(()) /// # }; @@ -173,9 +173,9 @@ where /// # let coll = client.database("foo").collection::("bar"); /// # let other_coll = coll.clone(); /// # let mut session = client.start_session().await?; - /// let mut cursor = coll.find_with_session(doc! { "x": 1 }, None, &mut session).await?; + /// let mut cursor = coll.find(doc! { "x": 1 }).session(&mut session).await?; /// while let Some(doc) = cursor.next(&mut session).await.transpose()? { - /// other_coll.insert_one_with_session(doc, None, &mut session).await?; + /// other_coll.insert_one(doc).session(&mut session).await?; /// } /// # Ok::<(), mongodb::error::Error>(()) /// # }; @@ -223,12 +223,12 @@ impl SessionCursor { /// [`SessionCursor::advance`] returns an error / false. /// /// ``` - /// # use mongodb::{Client, bson::Document, error::Result}; + /// # use mongodb::{Client, bson::{doc, Document}, error::Result}; /// # async fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017").await?; /// # let mut session = client.start_session().await?; /// # let coll = client.database("stuff").collection::("stuff"); - /// let mut cursor = coll.find_with_session(None, None, &mut session).await?; + /// let mut cursor = coll.find(doc! {}).session(&mut session).await?; /// while cursor.advance(&mut session).await? { /// println!("{:?}", cursor.current()); /// } @@ -256,12 +256,12 @@ impl SessionCursor { /// true or without calling [`SessionCursor::advance`] at all may result in a panic. /// /// ``` - /// # use mongodb::{Client, bson::Document, error::Result}; + /// # use mongodb::{Client, bson::{Document, doc}, error::Result}; /// # async fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017").await?; /// # let mut session = client.start_session().await?; /// # let coll = client.database("stuff").collection::("stuff"); - /// let mut cursor = coll.find_with_session(None, None, &mut session).await?; + /// let mut cursor = coll.find(doc! {}).session(&mut session).await?; /// while cursor.advance(&mut session).await? { /// println!("{:?}", cursor.current()); /// } @@ -281,7 +281,7 @@ impl SessionCursor { /// true or without calling [`SessionCursor::advance`] at all may result in a panic. /// /// ``` - /// # use mongodb::{Client, error::Result}; + /// # use mongodb::{Client, error::Result, bson::doc}; /// # async fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017").await?; /// # let mut session = client.start_session().await?; @@ -295,7 +295,7 @@ impl SessionCursor { /// } /// /// let coll = db.collection::("cat"); - /// let mut cursor = coll.find_with_session(None, None, &mut session).await?; + /// let mut cursor = coll.find(doc! {}).session(&mut session).await?; /// while cursor.advance(&mut session).await? { /// println!("{:?}", cursor.deserialize_current()?); /// } diff --git a/src/db.rs b/src/db.rs index dd16e5498..f9da393d7 100644 --- a/src/db.rs +++ b/src/db.rs @@ -117,7 +117,7 @@ impl Database { /// /// This method does not send or receive anything across the wire to the database, so it can be /// used repeatedly without incurring any costs from I/O. - pub fn collection(&self, name: &str) -> Collection { + pub fn collection(&self, name: &str) -> Collection { Collection::new(self.clone(), name, None) } @@ -130,7 +130,7 @@ impl Database { /// /// This method does not send or receive anything across the wire to the database, so it can be /// used repeatedly without incurring any costs from I/O. - pub fn collection_with_options( + pub fn collection_with_options( &self, name: &str, options: CollectionOptions, diff --git a/src/db/action/create_collection.rs b/src/db/action/create_collection.rs index 36de79267..45036b4d9 100644 --- a/src/db/action/create_collection.rs +++ b/src/db/action/create_collection.rs @@ -6,51 +6,52 @@ use crate::{ Namespace, }; -action_impl! { - impl<'a> Action for CreateCollection<'a> { - type Future = CreateCollectionFuture; +#[action_impl] +impl<'a> Action for CreateCollection<'a> { + type Future = CreateCollectionFuture; - async fn execute(mut self) -> Result<()> { - resolve_options!(self.db, self.options, [write_concern]); + async fn execute(mut self) -> Result<()> { + resolve_options!(self.db, self.options, [write_concern]); - let ns = Namespace { - db: self.db.name().to_string(), - coll: self.name, - }; - - #[cfg(feature = "in-use-encryption-unstable")] - let has_encrypted_fields = { - self.db.resolve_encrypted_fields(&ns, &mut self.options).await; - self.db.create_aux_collections(&ns, &self.options, self.session.as_deref_mut()) - .await?; - self.options - .as_ref() - .and_then(|o| o.encrypted_fields.as_ref()) - .is_some() - }; + let ns = Namespace { + db: self.db.name().to_string(), + coll: self.name, + }; - let create = op::Create::new(ns.clone(), self.options); - self.db.client() - .execute_operation(create, self.session.as_deref_mut()) + #[cfg(feature = "in-use-encryption-unstable")] + let has_encrypted_fields = { + self.db + .resolve_encrypted_fields(&ns, &mut self.options) + .await; + self.db + .create_aux_collections(&ns, &self.options, self.session.as_deref_mut()) .await?; + self.options + .as_ref() + .and_then(|o| o.encrypted_fields.as_ref()) + .is_some() + }; - #[cfg(feature = "in-use-encryption-unstable")] - if has_encrypted_fields { - use crate::action::Action; - use bson::{doc, Document}; - let coll = self.db.collection::(&ns.coll); - coll.create_index( - crate::IndexModel { - keys: doc! {"__safeContent__": 1}, - options: None, - } - ) - .optional(self.session.as_deref_mut(), |a, s| a.session(s)) - .await?; - } + let create = op::Create::new(ns.clone(), self.options); + self.db + .client() + .execute_operation(create, self.session.as_deref_mut()) + .await?; - Ok(()) + #[cfg(feature = "in-use-encryption-unstable")] + if has_encrypted_fields { + use crate::action::Action; + use bson::{doc, Document}; + let coll = self.db.collection::(&ns.coll); + coll.create_index(crate::IndexModel { + keys: doc! {"__safeContent__": 1}, + options: None, + }) + .optional(self.session.as_deref_mut(), |a, s| a.session(s)) + .await?; } + + Ok(()) } } diff --git a/src/error.rs b/src/error.rs index 300092a53..2707a2bff 100644 --- a/src/error.rs +++ b/src/error.rs @@ -30,6 +30,7 @@ const RETRYABLE_WRITE_CODES: [i32; 12] = [ 11600, 11602, 10107, 13435, 13436, 189, 91, 7, 6, 89, 9001, 262, ]; const UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL_CODES: [i32; 3] = [50, 64, 91]; +const REAUTHENTICATION_REQUIRED_CODE: i32 = 391; /// Retryable write error label. This label will be added to an error when the error is /// write-retryable. @@ -62,7 +63,7 @@ pub struct Error { impl Error { /// Create a new `Error` wrapping an arbitrary value. Can be used to abort transactions in - /// callbacks for [`ClientSession::with_transaction`](crate::ClientSession::with_transaction). + /// callbacks for [`StartTransaction::and_run`](crate::action::StartTransaction::and_run). pub fn custom(e: impl Any + Send + Sync) -> Self { Self::new(ErrorKind::Custom(Arc::new(e)), None::>) } @@ -128,6 +129,13 @@ impl Error { .into() } + pub(crate) fn invalid_response(message: impl Into) -> Error { + ErrorKind::InvalidResponse { + message: message.into(), + } + .into() + } + /// Construct a generic network timeout error. pub(crate) fn network_timeout() -> Error { ErrorKind::Io(Arc::new(std::io::ErrorKind::TimedOut.into())).into() @@ -388,6 +396,11 @@ impl Error { .unwrap_or(false) } + /// If this error corresponds to a "reauthentication required" error. + pub(crate) fn is_reauthentication_required(&self) -> bool { + self.sdam_code() == Some(REAUTHENTICATION_REQUIRED_CODE) + } + /// If this error corresponds to a "node is recovering" error as per the SDAM spec. pub(crate) fn is_recovering(&self) -> bool { self.sdam_code() diff --git a/src/gridfs.rs b/src/gridfs.rs index 372fe361d..5d343270f 100644 --- a/src/gridfs.rs +++ b/src/gridfs.rs @@ -11,16 +11,9 @@ use serde_with::skip_serializing_none; use crate::{ bson::{doc, oid::ObjectId, Bson, DateTime, Document, RawBinaryRef}, - cursor::Cursor, - error::{Error, ErrorKind, GridFsErrorKind, GridFsFileIdentifier, Result}, - options::{ - CollectionOptions, - FindOneOptions, - FindOptions, - ReadConcern, - SelectionCriteria, - WriteConcern, - }, + checked::Checked, + error::Error, + options::{CollectionOptions, ReadConcern, SelectionCriteria, WriteConcern}, Collection, Database, }; @@ -83,19 +76,14 @@ impl FilesCollectionDocument { fn n_from_vals(length: u64, chunk_size_bytes: u32) -> u32 { let chunk_size_bytes = chunk_size_bytes as u64; - let n = length / chunk_size_bytes + u64::from(length % chunk_size_bytes != 0); - n as u32 - } - - /// Returns the expected length of a chunk given its index. - fn expected_chunk_length(&self, n: u32) -> u32 { - Self::expected_chunk_length_from_vals(self.length, self.chunk_size_bytes, n) + let n = Checked::new(length) / chunk_size_bytes + u64::from(length % chunk_size_bytes != 0); + n.try_into().unwrap() } fn expected_chunk_length_from_vals(length: u64, chunk_size_bytes: u32, n: u32) -> u32 { let remainder = length % (chunk_size_bytes as u64); if n == Self::n_from_vals(length, chunk_size_bytes) - 1 && remainder != 0 { - remainder as u32 + Checked::new(remainder).try_into().unwrap() } else { chunk_size_bytes } @@ -187,7 +175,7 @@ impl GridFsBucket { } /// Gets the chunk size in bytes for the bucket. - fn chunk_size_bytes(&self) -> u32 { + pub(crate) fn chunk_size_bytes(&self) -> u32 { self.inner .options .chunk_size_bytes @@ -203,70 +191,6 @@ impl GridFsBucket { pub(crate) fn chunks(&self) -> &Collection> { &self.inner.chunks } - - /// Deletes the [`FilesCollectionDocument`] with the given `id` and its associated chunks from - /// this bucket. This method returns an error if the `id` does not match any files in the - /// bucket. - pub async fn delete(&self, id: Bson) -> Result<()> { - let delete_result = self.files().delete_one(doc! { "_id": id.clone() }).await?; - // Delete chunks regardless of whether a file was found. This will remove any possibly - // orphaned chunks. - self.chunks() - .delete_many(doc! { "files_id": id.clone() }) - .await?; - - if delete_result.deleted_count == 0 { - return Err(ErrorKind::GridFs(GridFsErrorKind::FileNotFound { - identifier: GridFsFileIdentifier::Id(id), - }) - .into()); - } - - Ok(()) - } - - /// Finds and returns the [`FilesCollectionDocument`]s within this bucket that match the given - /// filter. - pub async fn find( - &self, - filter: Document, - options: impl Into>, - ) -> Result> { - let find_options = options.into().map(FindOptions::from); - self.files().find(filter, find_options).await - } - - /// Finds and returns a single [`FilesCollectionDocument`] within this bucket that matches the - /// given filter. - pub async fn find_one( - &self, - filter: Document, - options: impl Into>, - ) -> Result> { - let find_options = options.into().map(FindOneOptions::from); - self.files().find_one(filter, find_options).await - } - - /// Renames the file with the given 'id' to the provided `new_filename`. This method returns an - /// error if the `id` does not match any files in the bucket. - pub async fn rename(&self, id: Bson, new_filename: impl AsRef) -> Result<()> { - self.files() - .update_one( - doc! { "_id": id }, - doc! { "$set": { "filename": new_filename.as_ref() } }, - ) - .await?; - - Ok(()) - } - - /// Removes all of the files and their associated chunks from this bucket. - pub async fn drop(&self) -> Result<()> { - self.files().drop().await?; - self.chunks().drop().await?; - - Ok(()) - } } impl Error { diff --git a/src/gridfs/download.rs b/src/gridfs/download.rs index ef23228f6..7569538b2 100644 --- a/src/gridfs/download.rs +++ b/src/gridfs/download.rs @@ -1,5 +1,4 @@ use std::{ - marker::Unpin, ops::Range, pin::Pin, task::{Context, Poll}, @@ -7,200 +6,17 @@ use std::{ use futures_util::{ future::{BoxFuture, FutureExt}, - io::{AsyncRead, AsyncWrite, AsyncWriteExt}, + io::AsyncRead, }; -use super::{options::GridFsDownloadByNameOptions, Chunk, FilesCollectionDocument, GridFsBucket}; +use super::{Chunk, FilesCollectionDocument}; use crate::{ - bson::{doc, Bson}, - error::{ErrorKind, GridFsErrorKind, GridFsFileIdentifier, Result}, - options::{FindOneOptions, FindOptions}, + bson::doc, + error::{ErrorKind, GridFsErrorKind, Result}, Collection, Cursor, }; -// Utility functions for finding files within the bucket. -impl GridFsBucket { - async fn find_file_by_id(&self, id: &Bson) -> Result { - match self.find_one(doc! { "_id": id }, None).await? { - Some(file) => Ok(file), - None => Err(ErrorKind::GridFs(GridFsErrorKind::FileNotFound { - identifier: GridFsFileIdentifier::Id(id.clone()), - }) - .into()), - } - } - - async fn find_file_by_name( - &self, - filename: &str, - options: Option, - ) -> Result { - let revision = options.and_then(|opts| opts.revision).unwrap_or(-1); - let (sort, skip) = if revision >= 0 { - (1, revision) - } else { - (-1, -revision - 1) - }; - let options = FindOneOptions::builder() - .sort(doc! { "uploadDate": sort }) - .skip(skip as u64) - .build(); - - match self - .files() - .find_one(doc! { "filename": filename }, options) - .await? - { - Some(fcd) => Ok(fcd), - None => { - if self - .files() - .find_one(doc! { "filename": filename }, None) - .await? - .is_some() - { - Err(ErrorKind::GridFs(GridFsErrorKind::RevisionNotFound { revision }).into()) - } else { - Err(ErrorKind::GridFs(GridFsErrorKind::FileNotFound { - identifier: GridFsFileIdentifier::Filename(filename.into()), - }) - .into()) - } - } - } - } -} - -// User functions for downloading to writers. -impl GridFsBucket { - /// Downloads the contents of the stored file specified by `id` and writes the contents to the - /// `destination`, which may be any type that implements the [`futures_io::AsyncWrite`] trait. - /// - /// To download to a type that implements [`tokio::io::AsyncWrite`], use the - /// [`tokio_util::compat`] module to convert between types. - /// - /// ```rust - /// # use mongodb::{bson::Bson, error::Result, gridfs::GridFsBucket}; - /// # async fn compat_example( - /// # bucket: GridFsBucket, - /// # tokio_writer: impl tokio::io::AsyncWrite + Unpin, - /// # id: Bson, - /// # ) -> Result<()> { - /// use tokio_util::compat::TokioAsyncWriteCompatExt; - /// - /// let futures_writer = tokio_writer.compat_write(); - /// bucket.download_to_futures_0_3_writer(id, futures_writer).await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// Note that once an `AsyncWrite` trait is stabilized in the standard library, this method will - /// be deprecated in favor of one that accepts a `std::io::AsyncWrite` source. - pub async fn download_to_futures_0_3_writer(&self, id: Bson, destination: T) -> Result<()> - where - T: AsyncWrite + Unpin, - { - let file = self.find_file_by_id(&id).await?; - self.download_to_writer_common(file, destination).await - } - - /// Downloads the contents of the stored file specified by `filename` and writes the contents to - /// the `destination`, which may be any type that implements the [`futures_io::AsyncWrite`] - /// trait. - /// - /// If there are multiple files in the bucket with the given filename, the `revision` in the - /// options provided is used to determine which one to download. See the documentation for - /// [`GridFsDownloadByNameOptions`] for details on how to specify a revision. If no revision is - /// provided, the file with `filename` most recently uploaded will be downloaded. - /// - /// To download to a type that implements [`tokio::io::AsyncWrite`], use the - /// [`tokio_util::compat`] module to convert between types. - /// - /// ```rust - /// # use mongodb::{bson::Bson, error::Result, gridfs::GridFsBucket}; - /// # async fn compat_example( - /// # bucket: GridFsBucket, - /// # tokio_writer: impl tokio::io::AsyncWrite + Unpin, - /// # id: Bson, - /// # ) -> Result<()> { - /// use tokio_util::compat::TokioAsyncWriteCompatExt; - /// - /// let futures_writer = tokio_writer.compat_write(); - /// bucket.download_to_futures_0_3_writer_by_name("example_file", futures_writer, None).await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// Note that once an `AsyncWrite` trait is stabilized in the standard library, this method will - /// be deprecated in favor of one that accepts a `std::io::AsyncWrite` source. - pub async fn download_to_futures_0_3_writer_by_name( - &self, - filename: impl AsRef, - destination: T, - options: impl Into>, - ) -> Result<()> - where - T: AsyncWrite + Unpin, - { - let file = self - .find_file_by_name(filename.as_ref(), options.into()) - .await?; - self.download_to_writer_common(file, destination).await - } - - async fn download_to_writer_common( - &self, - file: FilesCollectionDocument, - mut destination: T, - ) -> Result<()> - where - T: AsyncWrite + Unpin, - { - if file.length == 0 { - return Ok(()); - } - - let options = FindOptions::builder().sort(doc! { "n": 1 }).build(); - let mut cursor = self - .chunks() - .find(doc! { "files_id": &file.id }, options) - .await?; - - let mut n = 0; - while cursor.advance().await? { - let chunk = cursor.deserialize_current()?; - if chunk.n != n { - return Err(ErrorKind::GridFs(GridFsErrorKind::MissingChunk { n }).into()); - } - - let chunk_length = chunk.data.bytes.len(); - let expected_length = file.expected_chunk_length(n); - if chunk_length != expected_length as usize { - return Err(ErrorKind::GridFs(GridFsErrorKind::WrongSizeChunk { - actual_size: chunk_length, - expected_size: expected_length, - n, - }) - .into()); - } - - destination.write_all(chunk.data.bytes).await?; - n += 1; - } - - if n != file.n() { - return Err(ErrorKind::GridFs(GridFsErrorKind::WrongNumberOfChunks { - actual_number: n, - expected_number: file.n(), - }) - .into()); - } - - Ok(()) - } -} - /// A stream from which a file stored in a GridFS bucket can be downloaded. /// /// # Downloading from the Stream @@ -220,6 +36,9 @@ impl GridFsBucket { /// # } /// ``` /// +/// If the destination is a local file (or other `AsyncWrite` byte sink), the contents of the stream +/// can be efficiently written to it with [`futures_util::io::copy`]. +/// /// # Using [`tokio::io::AsyncRead`] /// Users who prefer to use tokio's `AsyncRead` trait can use the [`tokio_util::compat`] module. /// @@ -265,15 +84,17 @@ impl State { } impl GridFsDownloadStream { - async fn new( + pub(crate) async fn new( file: FilesCollectionDocument, chunks: &Collection>, ) -> Result { let initial_state = if file.length == 0 { State::Done } else { - let options = FindOptions::builder().sort(doc! { "n": 1 }).build(); - let cursor = chunks.find(doc! { "files_id": &file.id }, options).await?; + let cursor = chunks + .find(doc! { "files_id": &file.id }) + .sort(doc! { "n": 1 }) + .await?; State::Idle(Some(Idle { buffer: Vec::new(), cursor: Box::new(cursor), @@ -394,31 +215,3 @@ async fn get_bytes( Ok((buffer, cursor)) } - -// User functions for creating download streams. -impl GridFsBucket { - /// Opens and returns a [`GridFsDownloadStream`] from which the application can read - /// the contents of the stored file specified by `id`. - pub async fn open_download_stream(&self, id: Bson) -> Result { - let file = self.find_file_by_id(&id).await?; - GridFsDownloadStream::new(file, self.chunks()).await - } - - /// Opens and returns a [`GridFsDownloadStream`] from which the application can read - /// the contents of the stored file specified by `filename`. - /// - /// If there are multiple files in the bucket with the given filename, the `revision` in the - /// options provided is used to determine which one to download. See the documentation for - /// [`GridFsDownloadByNameOptions`] for details on how to specify a revision. If no revision is - /// provided, the file with `filename` most recently uploaded will be downloaded. - pub async fn open_download_stream_by_name( - &self, - filename: impl AsRef, - options: impl Into>, - ) -> Result { - let file = self - .find_file_by_name(filename.as_ref(), options.into()) - .await?; - GridFsDownloadStream::new(file, self.chunks()).await - } -} diff --git a/src/gridfs/upload.rs b/src/gridfs/upload.rs index a01bad476..404bf641d 100644 --- a/src/gridfs/upload.rs +++ b/src/gridfs/upload.rs @@ -6,160 +6,32 @@ use std::{ use futures_util::{ future::{BoxFuture, FutureExt}, - io::{AsyncRead, AsyncReadExt, AsyncWrite}, + io::AsyncWrite, stream::TryStreamExt, }; -use super::{options::GridFsUploadOptions, Chunk, FilesCollectionDocument, GridFsBucket}; +use super::{Chunk, FilesCollectionDocument, GridFsBucket}; use crate::{ action::Action, bson::{doc, oid::ObjectId, spec::BinarySubtype, Bson, DateTime, Document, RawBinaryRef}, bson_util::get_int, + checked::Checked, client::AsyncDropToken, error::{Error, ErrorKind, GridFsErrorKind, Result}, index::IndexModel, - options::{FindOneOptions, ReadPreference, SelectionCriteria}, + options::{ReadPreference, SelectionCriteria}, Collection, }; -// User functions for uploading from readers. impl GridFsBucket { - /// Uploads a user file to the bucket. Bytes are read from `source`, which may be any type that - /// implements the [`futures_io::AsyncRead`] trait, and stored in chunks in the bucket's - /// chunks collection. After all the chunks have been uploaded, a corresponding - /// [`FilesCollectionDocument`] is stored in the bucket's files collection. - /// - /// This method generates an [`ObjectId`] for the `id` field of the - /// [`FilesCollectionDocument`] and returns it. - /// - /// To upload from a type that implements [`tokio::io::AsyncRead`], use the - /// [`tokio_util::compat`] module to convert between types. - /// - /// ```rust - /// # use mongodb::{error::Result, gridfs::GridFsBucket}; - /// # async fn compat_example( - /// # bucket: GridFsBucket, - /// # tokio_reader: impl tokio::io::AsyncRead + Unpin) - /// # -> Result<()> { - /// use tokio_util::compat::TokioAsyncReadCompatExt; - /// - /// let futures_reader = tokio_reader.compat(); - /// bucket.upload_from_futures_0_3_reader("example_file", futures_reader, None).await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// Note that once an `AsyncRead` trait is stabilized in the standard library, this method will - /// be deprecated in favor of one that accepts a `std::io::AsyncRead` source. - pub async fn upload_from_futures_0_3_reader( - &self, - filename: impl AsRef, - source: T, - options: impl Into>, - ) -> Result - where - T: AsyncRead + Unpin, - { - let id = ObjectId::new(); - self.upload_from_futures_0_3_reader_with_id(id.into(), filename, source, options) - .await?; - Ok(id) - } - - /// Uploads a user file to the bucket. Bytes are read from `source`, which may be any type that - /// implements the [`futures_io::AsyncRead`] trait, and stored in chunks in the bucket's - /// chunks collection. After all the chunks have been uploaded, a corresponding - /// [`FilesCollectionDocument`] with the given `id` is stored in the bucket's files collection. - /// - /// To upload from a type that implements [`tokio::io::AsyncRead`], use the - /// [`tokio_util::compat`] module to convert between types. - /// - /// ```rust - /// # use mongodb::{bson::Bson, error::Result, gridfs::GridFsBucket}; - /// # async fn compat_example( - /// # bucket: GridFsBucket, - /// # tokio_reader: impl tokio::io::AsyncRead + Unpin, - /// # id: Bson, - /// # ) -> Result<()> { - /// use tokio_util::compat::TokioAsyncReadCompatExt; - /// - /// let futures_reader = tokio_reader.compat(); - /// bucket.upload_from_futures_0_3_reader_with_id(id, "example_file", futures_reader, None).await?; - /// # Ok(()) - /// # } - /// ``` - /// - /// Note that once an `AsyncRead` trait is stabilized in the standard library, this method will - /// be deprecated in favor of one that accepts a `std::io::AsyncRead` source. - pub async fn upload_from_futures_0_3_reader_with_id( - &self, - id: Bson, - filename: impl AsRef, - mut source: T, - options: impl Into>, - ) -> Result<()> - where - T: AsyncRead + Unpin, - { - let options = options.into(); - - self.create_indexes().await?; - - let chunk_size_bytes = options - .as_ref() - .and_then(|opts| opts.chunk_size_bytes) - .unwrap_or_else(|| self.chunk_size_bytes()); - let mut length = 0u64; - let mut n = 0; - - let mut buf = vec![0u8; chunk_size_bytes as usize]; - loop { - let bytes_read = match read_exact_or_to_end(&mut buf, &mut source).await { - Ok(0) => break, - Ok(n) => n, - Err(error) => { - return clean_up_chunks(id.clone(), self.chunks().clone(), Some(error)).await; - } - }; - - let chunk = Chunk { - id: ObjectId::new(), - files_id: id.clone(), - n, - data: RawBinaryRef { - subtype: BinarySubtype::Generic, - bytes: &buf[..bytes_read], - }, - }; - self.chunks().insert_one(chunk, None).await?; - - length += bytes_read as u64; - n += 1; - } - - let file = FilesCollectionDocument { - id, - length, - chunk_size_bytes, - upload_date: DateTime::now(), - filename: Some(filename.as_ref().to_string()), - metadata: options.and_then(|opts| opts.metadata), - }; - self.files().insert_one(file, None).await?; - - Ok(()) - } - async fn create_indexes(&self) -> Result<()> { if !self.inner.created_indexes.load(Ordering::SeqCst) { - let find_options = FindOneOptions::builder() - .selection_criteria(SelectionCriteria::ReadPreference(ReadPreference::Primary)) - .projection(doc! { "_id": 1 }) - .build(); if self .files() .clone_with_type::() - .find_one(None, find_options) + .find_one(doc! {}) + .selection_criteria(SelectionCriteria::ReadPreference(ReadPreference::Primary)) + .projection(doc! { "_id": 1 }) .await? .is_none() { @@ -174,7 +46,11 @@ impl GridFsBucket { Ok(()) } - async fn create_index(&self, coll: &Collection, keys: Document) -> Result<()> { + async fn create_index( + &self, + coll: &Collection, + keys: Document, + ) -> Result<()> { // listIndexes returns an error if the collection has not yet been created. // Ignore NamespaceExists errors if the collection has already been created. if let Err(error) = self @@ -224,25 +100,6 @@ impl GridFsBucket { } } -async fn read_exact_or_to_end(buf: &mut [u8], source: &mut T) -> Result -where - T: AsyncRead + Unpin, -{ - let mut total_bytes_read = 0; - loop { - let bytes_read = match source.read(&mut buf[total_bytes_read..]).await? { - 0 => break, - n => n, - }; - total_bytes_read += bytes_read; - if total_bytes_read == buf.len() { - break; - } - } - - Ok(total_bytes_read) -} - /// A stream to which bytes can be written to be uploaded to a GridFS bucket. /// /// # Uploading to the Stream @@ -262,13 +119,16 @@ where /// use futures_util::io::AsyncWriteExt; /// /// let bytes = vec![0u8; 100]; -/// let mut upload_stream = bucket.open_upload_stream("example_file", None); +/// let mut upload_stream = bucket.open_upload_stream("example_file").await?; /// upload_stream.write_all(&bytes[..]).await?; /// upload_stream.close().await?; /// # Ok(()) /// # } /// ``` /// +/// If the data is a local file (or other `AsyncRead` byte source), its contents can be efficiently +/// written to the stream with [`futures_util::io::copy`]. +/// /// # Aborting the Stream /// A stream can be aborted by calling the `abort` method. This will remove any chunks associated /// with the stream from the chunks collection. It is an error to write to, abort, or close the @@ -280,7 +140,7 @@ where /// use futures_util::io::AsyncWriteExt; /// /// let bytes = vec![0u8; 100]; -/// let mut upload_stream = bucket.open_upload_stream("example_file", None); +/// let mut upload_stream = bucket.open_upload_stream("example_file").await?; /// upload_stream.write_all(&bytes[..]).await?; /// upload_stream.abort().await?; /// # Ok(()) @@ -307,11 +167,13 @@ where /// /// ```rust /// # use mongodb::gridfs::{GridFsBucket, GridFsUploadStream}; -/// # fn compat_example(bucket: GridFsBucket) { +/// # use mongodb::error::Result; +/// # async fn compat_example(bucket: GridFsBucket) -> Result<()> { /// use tokio_util::compat::FuturesAsyncWriteCompatExt; /// -/// let futures_upload_stream = bucket.open_upload_stream("example_file", None); +/// let futures_upload_stream = bucket.open_upload_stream("example_file").await?; /// let tokio_upload_stream = futures_upload_stream.compat_write(); +/// # Ok(()) /// # } /// ``` pub struct GridFsUploadStream { @@ -358,6 +220,26 @@ impl State { } impl GridFsUploadStream { + pub(crate) fn new( + bucket: GridFsBucket, + id: Bson, + filename: String, + chunk_size_bytes: u32, + metadata: Option, + drop_token: AsyncDropToken, + ) -> Self { + Self { + bucket, + state: State::Idle(Some(Vec::new())), + current_n: 0, + id, + filename: Some(filename), + chunk_size_bytes, + metadata: Some(metadata), + drop_token, + } + } + /// Gets the stream's unique [`Bson`] identifier. This value will be the `id` field for the /// [`FilesCollectionDocument`] uploaded to the files collection when the stream is closed. pub fn id(&self) -> &Bson { @@ -505,31 +387,32 @@ async fn write_bytes( chunk_size_bytes: u32, files_id: Bson, ) -> Result<(u32, Vec)> { + let chunk_size_bytes: usize = Checked::new(chunk_size_bytes).try_into()?; bucket.create_indexes().await?; - let mut n = 0; + let mut n = Checked::new(0); let mut chunks = vec![]; - while buffer.len() as u32 - (n * chunk_size_bytes) >= chunk_size_bytes { + while (Checked::new(buffer.len()) - (n * chunk_size_bytes)).get()? >= chunk_size_bytes { let start = n * chunk_size_bytes; let end = (n + 1) * chunk_size_bytes; let chunk = Chunk { id: ObjectId::new(), files_id: files_id.clone(), - n: starting_n + n, + n: starting_n + n.try_into::()?, data: RawBinaryRef { subtype: BinarySubtype::Generic, - bytes: &buffer[(start as usize)..(end as usize)], + bytes: &buffer[start.get()?..end.get()?], }, }; n += 1; chunks.push(chunk); } - match bucket.chunks().insert_many(chunks, None).await { + match bucket.chunks().insert_many(chunks).await { Ok(_) => { - buffer.drain(..(n * chunk_size_bytes) as usize); - Ok((n, buffer)) + buffer.drain(..(n * chunk_size_bytes).get()?); + Ok((n.try_into()?, buffer)) } Err(error) => match clean_up_chunks(files_id, bucket.chunks().clone(), Some(error)).await { // clean_up_chunks will always return an error if one is passed in, so this case is @@ -553,9 +436,9 @@ async fn close(bucket: GridFsBucket, buffer: Vec, file: FilesCollectionDocum bytes: &buffer[..], }, }; - bucket.chunks().insert_one(final_chunk, None).await?; + bucket.chunks().insert_one(final_chunk).await?; } - bucket.files().insert_one(&file, None).await?; + bucket.files().insert_one(&file).await?; Ok(()) } .await; @@ -588,43 +471,3 @@ fn get_closed_error() -> futures_io::Error { let error: Error = ErrorKind::GridFs(GridFsErrorKind::UploadStreamClosed).into(); error.into_futures_io_error() } - -// User functions for creating upload streams. -impl GridFsBucket { - /// Creates and returns a [`GridFsUploadStream`] that the application can write the contents of - /// the file to. This method generates a unique [`ObjectId`] for the corresponding - /// [`FilesCollectionDocument`]'s `id` field that can be accessed via the stream's `id` - /// method. - pub fn open_upload_stream( - &self, - filename: impl AsRef, - options: impl Into>, - ) -> GridFsUploadStream { - self.open_upload_stream_with_id(ObjectId::new().into(), filename, options) - } - - /// Opens a [`GridFsUploadStream`] that the application can write the contents of the file to. - /// The provided `id` will be used for the corresponding [`FilesCollectionDocument`]'s `id` - /// field. - pub fn open_upload_stream_with_id( - &self, - id: Bson, - filename: impl AsRef, - options: impl Into>, - ) -> GridFsUploadStream { - let options = options.into(); - GridFsUploadStream { - bucket: self.clone(), - state: State::Idle(Some(Vec::new())), - current_n: 0, - id, - filename: Some(filename.as_ref().into()), - chunk_size_bytes: options - .as_ref() - .and_then(|opts| opts.chunk_size_bytes) - .unwrap_or_else(|| self.chunk_size_bytes()), - metadata: Some(options.and_then(|opts| opts.metadata)), - drop_token: self.client().register_async_drop(), - } - } -} diff --git a/src/hello.rs b/src/hello.rs index 1b2625fbc..21ba761d6 100644 --- a/src/hello.rs +++ b/src/hello.rs @@ -54,7 +54,13 @@ pub(crate) fn hello_command( if let Some(opts) = awaitable_options { command.insert("topologyVersion", opts.topology_version); - command.insert("maxAwaitTimeMS", opts.max_await_time.as_millis() as i64); + command.insert( + "maxAwaitTimeMS", + opts.max_await_time + .as_millis() + .try_into() + .unwrap_or(i64::MAX), + ); } let mut command = Command::new(command_name, "admin", command); diff --git a/src/lib.rs b/src/lib.rs index 88ba5d39b..123c45cf9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -98,7 +98,7 @@ //! ]; //! //! // Insert some documents into the "mydb.books" collection. -//! collection.insert_many(docs, None).await?; +//! collection.insert_many(docs).await?; //! # Ok(()) } //! ``` //! @@ -137,7 +137,7 @@ //! ]; //! //! // Insert the books into "mydb.books" collection, no manual conversion to BSON necessary. -//! typed_collection.insert_many(books, None).await?; +//! typed_collection.insert_many(books).await?; //! # Ok(()) } //! ``` //! @@ -166,8 +166,10 @@ //! //! // Query the books in the collection with a filter and an option. //! let filter = doc! { "author": "George Orwell" }; -//! let find_options = FindOptions::builder().sort(doc! { "title": 1 }).build(); -//! let mut cursor = typed_collection.find(filter, find_options).await?; +//! let mut cursor = typed_collection +//! .find(filter) +//! .sort(doc! { "title": 1 }) +//! .await?; //! //! // Iterate over the results of the cursor. //! while let Some(book) = cursor.try_next().await? { @@ -218,9 +220,9 @@ //! ]; //! //! // Insert some books into the "mydb.books" collection. -//! collection.insert_many(docs, None)?; +//! collection.insert_many(docs).run()?; //! -//! let cursor = collection.find(doc! { "author": "George Orwell" }, None)?; +//! let cursor = collection.find(doc! { "author": "George Orwell" }).run()?; //! for result in cursor { //! println!("title: {}", result?.title); //! } @@ -256,7 +258,7 @@ //! # let client = Client::with_uri_str("mongodb://example.com").await?; //! let collection = client.database("foo").collection("bar"); //! let handle = tokio::task::spawn(async move { -//! collection.insert_one(doc! { "x": 1 }, None).await +//! collection.insert_one(doc! { "x": 1 }).await //! }); //! //! tokio::time::timeout(Duration::from_secs(5), handle).await???; @@ -271,6 +273,8 @@ #![warn(missing_docs)] #![warn(rustdoc::missing_crate_level_docs)] +#![warn(clippy::cast_possible_truncation)] +#![warn(clippy::cast_possible_wrap)] #![cfg_attr( feature = "cargo-clippy", allow( @@ -296,6 +300,7 @@ pub use ::mongocrypt; pub mod action; mod bson_util; pub mod change_stream; +pub(crate) mod checked; mod client; mod cmap; mod coll; diff --git a/src/operation.rs b/src/operation.rs index b017fdcd3..7e1bbb012 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -12,7 +12,7 @@ pub(crate) mod drop_collection; pub(crate) mod drop_database; mod drop_indexes; mod find; -mod find_and_modify; +pub(crate) mod find_and_modify; mod get_more; mod insert; pub(crate) mod list_collections; diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 08be24680..32e4a8cef 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -219,9 +219,9 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { let mut split = false; if self.encrypted && i != 0 { - let model_entry_size = array_entry_size_bytes(i, operation_size); + let model_entry_size = array_entry_size_bytes(i, operation_size)?; let namespace_entry_size = if namespace_size > 0 { - array_entry_size_bytes(namespace_index, namespace_size) + array_entry_size_bytes(namespace_index, namespace_size)? } else { 0 }; diff --git a/src/operation/find.rs b/src/operation/find.rs index 647362f7b..5b5a22f17 100644 --- a/src/operation/find.rs +++ b/src/operation/find.rs @@ -20,16 +20,12 @@ use super::{handle_response_sync, OperationResponse}; #[derive(Debug)] pub(crate) struct Find { ns: Namespace, - filter: Option, + filter: Document, options: Option>, } impl Find { - pub(crate) fn new( - ns: Namespace, - filter: Option, - mut options: Option, - ) -> Self { + pub(crate) fn new(ns: Namespace, filter: Document, mut options: Option) -> Self { if let Some(ref mut options) = options { if let Some(ref comment) = options.comment { if options.comment_bson.is_none() { @@ -87,9 +83,7 @@ impl OperationWithDefaults for Find { append_options(&mut body, self.options.as_ref())?; - if let Some(ref filter) = self.filter { - body.insert("filter", filter.clone()); - } + body.insert("filter", self.filter.clone()); Ok(Command::new_read( Self::NAME.to_string(), diff --git a/src/operation/find_and_modify.rs b/src/operation/find_and_modify.rs index 924d67909..17bfd2f6a 100644 --- a/src/operation/find_and_modify.rs +++ b/src/operation/find_and_modify.rs @@ -1,24 +1,16 @@ -mod options; +pub(crate) mod options; -use std::fmt::Debug; +use std::{fmt::Debug, marker::PhantomData}; use bson::{from_slice, RawBson}; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize}; use self::options::FindAndModifyOptions; use crate::{ bson::{doc, rawdoc, Document, RawDocumentBuf}, bson_util, cmap::{Command, RawCommandResponse, StreamDescription}, - coll::{ - options::{ - FindOneAndDeleteOptions, - FindOneAndReplaceOptions, - FindOneAndUpdateOptions, - UpdateModifications, - }, - Namespace, - }, + coll::{options::UpdateModifications, Namespace}, error::{ErrorKind, Result}, operation::{ append_options_to_raw_document, @@ -31,73 +23,40 @@ use crate::{ ClientSession, }; -use super::{handle_response_sync, OperationResponse}; +use super::{handle_response_sync, OperationResponse, UpdateOrReplace}; -pub(crate) struct FindAndModify<'a, R, T: DeserializeOwned> { +pub(crate) struct FindAndModify { ns: Namespace, query: Document, - modification: Modification<'a, R>, - human_readable_serialization: Option, + modification: Modification, options: Option, - _phantom: std::marker::PhantomData, + _phantom: PhantomData T>, } -impl FindAndModify<'_, (), T> { - pub fn with_delete( - ns: Namespace, - query: Document, - options: Option, - ) -> Self { - FindAndModify { - ns, - query, - modification: Modification::Delete, - human_readable_serialization: None, - options: options.map(Into::into), - _phantom: Default::default(), - } - } - - pub fn with_update( +impl FindAndModify { + pub(crate) fn with_modification( ns: Namespace, query: Document, - update: UpdateModifications, - options: Option, + modification: Modification, + options: Option, ) -> Result { - if let UpdateModifications::Document(ref d) = update { + if let Modification::Update(UpdateOrReplace::UpdateModifications( + UpdateModifications::Document(d), + )) = &modification + { bson_util::update_document_check(d)?; }; - Ok(FindAndModify { - ns, - query, - modification: Modification::Update(update.into()), - human_readable_serialization: None, - options: options.map(Into::into), - _phantom: Default::default(), - }) - } -} - -impl<'a, R: Serialize, T: DeserializeOwned> FindAndModify<'a, R, T> { - pub fn with_replace( - ns: Namespace, - query: Document, - replacement: &'a R, - options: Option, - human_readable_serialization: bool, - ) -> Result { - Ok(FindAndModify { + Ok(Self { ns, query, - modification: Modification::Update(replacement.into()), - human_readable_serialization: Some(human_readable_serialization), - options: options.map(Into::into), - _phantom: Default::default(), + modification, + options, + _phantom: PhantomData, }) } } -impl<'a, R: Serialize, T: DeserializeOwned> OperationWithDefaults for FindAndModify<'a, R, T> { +impl OperationWithDefaults for FindAndModify { type O = Option; type Command = RawDocumentBuf; const NAME: &'static str = "findAndModify"; @@ -119,15 +78,12 @@ impl<'a, R: Serialize, T: DeserializeOwned> OperationWithDefaults for FindAndMod "query": RawDocumentBuf::from_document(&self.query)?, }; - let (key, modification) = match &self.modification { - Modification::Delete => ("remove", true.into()), - Modification::Update(update_or_replace) => ( - "update", - update_or_replace - .to_raw_bson(self.human_readable_serialization.unwrap_or_default())?, - ), - }; - body.append(key, modification); + match &self.modification { + Modification::Delete => body.append("remove", true), + Modification::Update(update_or_replace) => { + update_or_replace.append_to_rawdoc(&mut body, "update")? + } + } if let Some(ref mut options) = self.options { remove_empty_write_concern!(Some(options)); diff --git a/src/operation/find_and_modify/options.rs b/src/operation/find_and_modify/options.rs index fcdb90af0..5147c3ba5 100644 --- a/src/operation/find_and_modify/options.rs +++ b/src/operation/find_and_modify/options.rs @@ -19,16 +19,16 @@ use crate::{ }; #[derive(Clone, Debug)] -pub(super) enum Modification<'a, T> { +pub(crate) enum Modification { Delete, - Update(UpdateOrReplace<'a, T>), + Update(UpdateOrReplace), } #[serde_with::skip_serializing_none] -#[derive(Clone, Debug, TypedBuilder, Serialize)] +#[derive(Clone, Debug, TypedBuilder, Serialize, Default)] #[builder(field_defaults(setter(into)))] #[serde(rename_all = "camelCase")] -pub(super) struct FindAndModifyOptions { +pub(crate) struct FindAndModifyOptions { #[builder(default)] pub(crate) sort: Option, @@ -130,11 +130,5 @@ impl From for FindAndModifyOptions { } fn return_document_to_bool(return_document: Option) -> Option { - if let Some(return_document) = return_document { - return match return_document { - ReturnDocument::After => Some(true), - ReturnDocument::Before => Some(false), - }; - } - None + return_document.as_ref().map(ReturnDocument::as_bool) } diff --git a/src/operation/get_more.rs b/src/operation/get_more.rs index f1dc91a06..5876fdad8 100644 --- a/src/operation/get_more.rs +++ b/src/operation/get_more.rs @@ -70,7 +70,10 @@ impl<'conn> OperationWithDefaults for GetMore<'conn> { } if let Some(ref max_time) = self.max_time { - body.insert("maxTimeMS", max_time.as_millis() as i32); + body.insert( + "maxTimeMS", + max_time.as_millis().try_into().unwrap_or(i32::MAX), + ); } if let Some(ref comment) = self.comment { diff --git a/src/operation/insert.rs b/src/operation/insert.rs index 82f6ec4f9..44f1d44f1 100644 --- a/src/operation/insert.rs +++ b/src/operation/insert.rs @@ -1,22 +1,19 @@ use std::collections::HashMap; -use bson::{Bson, RawDocumentBuf}; -use serde::Serialize; - use crate::{ - bson::rawdoc, + bson::{rawdoc, Bson, RawDocument, RawDocumentBuf}, bson_util::{ array_entry_size_bytes, extend_raw_document_buf, get_or_prepend_id_field, vec_to_raw_array_buf, }, + checked::Checked, cmap::{Command, RawCommandResponse, StreamDescription}, error::{BulkWriteFailure, Error, ErrorKind, Result}, operation::{OperationWithDefaults, Retryability, WriteResponseBody}, options::{InsertManyOptions, WriteConcern}, results::InsertManyResult, - serde_util, ClientSession, Namespace, }; @@ -29,22 +26,20 @@ use super::{ }; #[derive(Debug)] -pub(crate) struct Insert<'a, T> { +pub(crate) struct Insert<'a> { ns: Namespace, - documents: Vec<&'a T>, + documents: Vec<&'a RawDocument>, inserted_ids: Vec, options: InsertManyOptions, encrypted: bool, - human_readable_serialization: bool, } -impl<'a, T> Insert<'a, T> { +impl<'a> Insert<'a> { pub(crate) fn new( ns: Namespace, - documents: Vec<&'a T>, + documents: Vec<&'a RawDocument>, options: Option, encrypted: bool, - human_readable_serialization: bool, ) -> Self { let mut options = options.unwrap_or_default(); if options.ordered.is_none() { @@ -57,12 +52,11 @@ impl<'a, T> Insert<'a, T> { documents, inserted_ids: vec![], encrypted, - human_readable_serialization, } } } -impl<'a, T: Serialize> OperationWithDefaults for Insert<'a, T> { +impl<'a> OperationWithDefaults for Insert<'a> { type O = InsertManyResult; type Command = RawDocumentBuf; @@ -72,22 +66,22 @@ impl<'a, T: Serialize> OperationWithDefaults for Insert<'a, T> { let mut docs = Vec::new(); let mut size = 0; - let max_doc_size = description.max_bson_object_size as usize; + let max_doc_size = Checked::::try_from(description.max_bson_object_size)?; let max_doc_sequence_size = - description.max_message_size_bytes as usize - COMMAND_OVERHEAD_SIZE; + Checked::::try_from(description.max_message_size_bytes)? - COMMAND_OVERHEAD_SIZE; + let max_write_batch_size = Checked::::try_from(description.max_write_batch_size)?; - for (i, d) in self + for (i, document) in self .documents .iter() - .take(description.max_write_batch_size as usize) + .take(max_write_batch_size.get()?) .enumerate() { - let mut doc = - serde_util::to_raw_document_buf_with_options(d, self.human_readable_serialization)?; - let id = get_or_prepend_id_field(&mut doc)?; + let mut document = bson::to_raw_document_buf(document)?; + let id = get_or_prepend_id_field(&mut document)?; - let doc_size = doc.as_bytes().len(); - if doc_size > max_doc_size { + let doc_size = document.as_bytes().len(); + if doc_size > max_doc_size.get()? { return Err(ErrorKind::InvalidArgument { message: format!( "insert document must be within {} bytes, but document provided is {} \ @@ -102,16 +96,16 @@ impl<'a, T: Serialize> OperationWithDefaults for Insert<'a, T> { // automatic encryption. I.e. if a single document has size larger than 2MiB (but less // than `maxBsonObjectSize`) proceed with automatic encryption. if self.encrypted && i != 0 { - let doc_entry_size = array_entry_size_bytes(i, doc.as_bytes().len()); - if size + doc_entry_size >= MAX_ENCRYPTED_WRITE_SIZE { + let doc_entry_size = array_entry_size_bytes(i, document.as_bytes().len())?; + if (Checked::new(size) + doc_entry_size).get()? >= MAX_ENCRYPTED_WRITE_SIZE { break; } - } else if size + doc_size > max_doc_sequence_size { + } else if (Checked::new(size) + doc_size).get()? > max_doc_sequence_size.get()? { break; } self.inserted_ids.push(id); - docs.push(doc); + docs.push(document); size += doc_size; } @@ -140,25 +134,21 @@ impl<'a, T: Serialize> OperationWithDefaults for Insert<'a, T> { _session: Option<&mut ClientSession>, ) -> OperationResponse<'static, Self::O> { handle_response_sync! {{ - let response: WriteResponseBody = raw_response.body_utf8_lossy()?; - - let mut map = HashMap::new(); - if self.options.ordered == Some(true) { - // in ordered inserts, only the first n were attempted. - for (i, id) in self - .inserted_ids - .iter() - .enumerate() - .take(response.n as usize) - { - map.insert(i, id.clone()); - } - } else { - // for unordered, add all the attempted ids and then remove the ones that have - // associated write errors. - for (i, id) in self.inserted_ids.iter().enumerate() { - map.insert(i, id.clone()); - } + let response: WriteResponseBody = raw_response.body_utf8_lossy()?; + let response_n = Checked::::try_from(response.n)?; + + let mut map = HashMap::new(); + if self.options.ordered == Some(true) { + // in ordered inserts, only the first n were attempted. + for (i, id) in self.inserted_ids.iter().enumerate().take(response_n.get()?) { + map.insert(i, id.clone()); + } + } else { + // for unordered, add all the attempted ids and then remove the ones that have + // associated write errors. + for (i, id) in self.inserted_ids.iter().enumerate() { + map.insert(i, id.clone()); + } if let Some(write_errors) = response.write_errors.as_ref() { for err in write_errors { diff --git a/src/operation/insert/test.rs b/src/operation/insert/test.rs deleted file mode 100644 index c8385338f..000000000 --- a/src/operation/insert/test.rs +++ /dev/null @@ -1,153 +0,0 @@ -use once_cell::sync::Lazy; -use serde::{Deserialize, Serialize}; - -use crate::{ - bson::{doc, Document}, - cmap::StreamDescription, - concern::WriteConcern, - error::{BulkWriteError, ErrorKind, WriteConcernError}, - operation::{test::handle_response_test, Insert, Operation}, - options::InsertManyOptions, - Namespace, -}; - -struct TestFixtures { - op: Insert<'static, Document>, - documents: Vec, -} - -/// Get an Insert operation and the documents/options used to construct it. -fn fixtures(opts: Option) -> TestFixtures { - static DOCUMENTS: Lazy> = Lazy::new(|| { - vec![ - Document::new(), - doc! {"_id": 1234, "a": 1}, - doc! {"a": 123, "b": "hello world" }, - ] - }); - - let options = opts.unwrap_or(InsertManyOptions { - ordered: Some(true), - write_concern: Some(WriteConcern::builder().journal(true).build()), - ..Default::default() - }); - - let op = Insert::new( - Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }, - DOCUMENTS.iter().collect(), - Some(options.clone()), - false, - false, - ); - - TestFixtures { - op, - documents: DOCUMENTS.clone(), - } -} - -#[derive(Debug, Serialize, Deserialize)] -struct Documents { - documents: Vec, -} - -#[test] -fn handle_success() { - let mut fixtures = fixtures(None); - - // populate _id for documents that don't provide it - fixtures - .op - .build(&StreamDescription::new_testing()) - .unwrap(); - let response = handle_response_test(&fixtures.op, doc! { "ok": 1.0, "n": 3 }).unwrap(); - let inserted_ids = response.inserted_ids; - assert_eq!(inserted_ids.len(), 3); - assert_eq!( - inserted_ids.get(&1).unwrap(), - fixtures.documents[1].get("_id").unwrap() - ); -} - -#[test] -fn handle_invalid_response() { - let fixtures = fixtures(None); - handle_response_test(&fixtures.op, doc! { "ok": 1.0, "asdfadsf": 123123 }).unwrap_err(); -} - -#[test] -fn handle_write_failure() { - let mut fixtures = fixtures(None); - - // generate _id for operations missing it. - let _ = fixtures - .op - .build(&StreamDescription::new_testing()) - .unwrap(); - - let write_error_response = doc! { - "ok": 1.0, - "n": 1, - "writeErrors": [ - { - "index": 1, - "code": 11000, - "errmsg": "duplicate key", - "errInfo": { - "test key": "test value", - } - } - ], - "writeConcernError": { - "code": 123, - "codeName": "woohoo", - "errmsg": "error message", - "errInfo": { - "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } - } - } - }; - - let write_error_response = - handle_response_test(&fixtures.op, write_error_response).unwrap_err(); - match *write_error_response.kind { - ErrorKind::BulkWrite(bwe) => { - let write_errors = bwe.write_errors.expect("write errors should be present"); - assert_eq!(write_errors.len(), 1); - let expected_err = BulkWriteError { - index: 1, - code: 11000, - code_name: None, - message: "duplicate key".to_string(), - details: Some(doc! { "test key": "test value" }), - }; - assert_eq!(write_errors.first().unwrap(), &expected_err); - - let write_concern_error = bwe - .write_concern_error - .expect("write concern error should be present"); - let expected_wc_err = WriteConcernError { - code: 123, - code_name: "woohoo".to_string(), - message: "error message".to_string(), - details: Some(doc! { "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } }), - labels: vec![], - }; - assert_eq!(write_concern_error, expected_wc_err); - - assert_eq!(bwe.inserted_ids.len(), 1); - } - e => panic!("expected bulk write error, got {:?}", e), - }; -} diff --git a/src/operation/update.rs b/src/operation/update.rs index e870e9e5f..390081f69 100644 --- a/src/operation/update.rs +++ b/src/operation/update.rs @@ -1,4 +1,4 @@ -use serde::{Deserialize, Serialize}; +use serde::Deserialize; use crate::{ bson::{doc, rawdoc, Document, RawArrayBuf, RawBson, RawDocumentBuf}, @@ -8,7 +8,6 @@ use crate::{ operation::{OperationWithDefaults, Retryability, WriteResponseBody}, options::{UpdateModifications, UpdateOptions, WriteConcern}, results::UpdateResult, - serde_util::to_raw_document_buf_with_options, ClientSession, Namespace, }; @@ -16,60 +15,56 @@ use crate::{ use super::{handle_response_sync, OperationResponse}; #[derive(Clone, Debug)] -pub(crate) enum UpdateOrReplace<'a, T = ()> { +pub(crate) enum UpdateOrReplace { UpdateModifications(UpdateModifications), - Replacement(&'a T), + Replacement(RawDocumentBuf), } -impl<'a, T: Serialize> UpdateOrReplace<'a, T> { - pub(crate) fn to_raw_bson(&self, human_readable_serialization: bool) -> Result { +impl UpdateOrReplace { + pub(crate) fn append_to_rawdoc(&self, doc: &mut RawDocumentBuf, key: &str) -> Result<()> { match self { Self::UpdateModifications(update_modifications) => match update_modifications { UpdateModifications::Document(document) => { - Ok(RawDocumentBuf::from_document(document)?.into()) + let raw = RawDocumentBuf::from_document(document)?; + doc.append(key, raw); + } + UpdateModifications::Pipeline(pipeline) => { + let raw = bson_util::to_raw_bson_array(pipeline)?; + doc.append(key, raw); } - UpdateModifications::Pipeline(pipeline) => bson_util::to_raw_bson_array(pipeline), }, - Self::Replacement(replacement) => { - let replacement_doc = - to_raw_document_buf_with_options(replacement, human_readable_serialization)?; - bson_util::replacement_raw_document_check(&replacement_doc)?; - Ok(replacement_doc.into()) + Self::Replacement(replacement_doc) => { + bson_util::replacement_raw_document_check(replacement_doc)?; + doc.append_ref(key, replacement_doc); } } + + Ok(()) } } -impl From for UpdateOrReplace<'_> { +impl From for UpdateOrReplace { fn from(update_modifications: UpdateModifications) -> Self { Self::UpdateModifications(update_modifications) } } -impl<'a, T: Serialize> From<&'a T> for UpdateOrReplace<'a, T> { - fn from(t: &'a T) -> Self { - Self::Replacement(t) - } -} - #[derive(Debug)] -pub(crate) struct Update<'a, T = ()> { +pub(crate) struct Update { ns: Namespace, filter: Document, - update: UpdateOrReplace<'a, T>, + update: UpdateOrReplace, multi: Option, options: Option, - human_readable_serialization: bool, } -impl Update<'_> { +impl Update { pub(crate) fn with_update( ns: Namespace, filter: Document, update: UpdateModifications, multi: bool, options: Option, - human_readable_serialization: bool, ) -> Self { Self { ns, @@ -77,32 +72,27 @@ impl Update<'_> { update: update.into(), multi: multi.then_some(true), options, - human_readable_serialization, } } -} -impl<'a, T: Serialize> Update<'a, T> { - pub(crate) fn with_replace( + pub(crate) fn with_replace_raw( ns: Namespace, filter: Document, - update: &'a T, + update: RawDocumentBuf, multi: bool, options: Option, - human_readable_serialization: bool, - ) -> Self { - Self { + ) -> Result { + Ok(Self { ns, filter, - update: update.into(), + update: UpdateOrReplace::Replacement(update), multi: multi.then_some(true), options, - human_readable_serialization, - } + }) } } -impl<'a, T: Serialize> OperationWithDefaults for Update<'a, T> { +impl OperationWithDefaults for Update { type O = UpdateResult; type Command = RawDocumentBuf; @@ -115,8 +105,8 @@ impl<'a, T: Serialize> OperationWithDefaults for Update<'a, T> { let mut update = rawdoc! { "q": RawDocumentBuf::from_document(&self.filter)?, - "u": self.update.to_raw_bson(self.human_readable_serialization)?, }; + self.update.append_to_rawdoc(&mut update, "u")?; if let Some(ref options) = self.options { if let Some(upsert) = options.upsert { diff --git a/src/options.rs b/src/options.rs index b5f7ac37a..a576a1abf 100644 --- a/src/options.rs +++ b/src/options.rs @@ -15,12 +15,17 @@ //! .build(); //! ``` +#[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" +))] +pub use crate::compression::compressors::Compressor; pub use crate::{ change_stream::options::*, client::{auth::*, options::*}, coll::options::*, collation::*, - compression::*, concern::*, db::options::*, gridfs::options::*, diff --git a/src/sdam/description/topology/server_selection.rs b/src/sdam/description/topology/server_selection.rs index d41ccade6..65fa9af75 100644 --- a/src/sdam/description/topology/server_selection.rs +++ b/src/sdam/description/topology/server_selection.rs @@ -198,29 +198,31 @@ impl TopologyDescription { &self, read_preference: &ReadPreference, ) -> Result> { + let tag_sets = read_preference.tag_sets(); + let max_staleness = read_preference.max_staleness(); + let servers = match read_preference { ReadPreference::Primary => self.servers_with_type(&[ServerType::RsPrimary]).collect(), - ReadPreference::Secondary { ref options } => self - .suitable_servers_for_read_preference( - &[ServerType::RsSecondary], - options.tag_sets.as_ref(), - options.max_staleness, - )?, - ReadPreference::PrimaryPreferred { ref options } => { + ReadPreference::Secondary { .. } => self.suitable_servers_for_read_preference( + &[ServerType::RsSecondary], + tag_sets, + max_staleness, + )?, + ReadPreference::PrimaryPreferred { .. } => { match self.servers_with_type(&[ServerType::RsPrimary]).next() { Some(primary) => vec![primary], None => self.suitable_servers_for_read_preference( &[ServerType::RsSecondary], - options.tag_sets.as_ref(), - options.max_staleness, + tag_sets, + max_staleness, )?, } } - ReadPreference::SecondaryPreferred { ref options } => { + ReadPreference::SecondaryPreferred { .. } => { let suitable_servers = self.suitable_servers_for_read_preference( &[ServerType::RsSecondary], - options.tag_sets.as_ref(), - options.max_staleness, + tag_sets, + max_staleness, )?; if suitable_servers.is_empty() { @@ -229,10 +231,10 @@ impl TopologyDescription { suitable_servers } } - ReadPreference::Nearest { ref options } => self.suitable_servers_for_read_preference( + ReadPreference::Nearest { .. } => self.suitable_servers_for_read_preference( &[ServerType::RsPrimary, ServerType::RsSecondary], - options.tag_sets.as_ref(), - options.max_staleness, + tag_sets, + max_staleness, )?, }; @@ -291,7 +293,7 @@ impl TopologyDescription { primary: &ServerDescription, max_staleness: Duration, ) { - let max_staleness_ms = max_staleness.as_millis() as i64; + let max_staleness_ms = max_staleness.as_millis().try_into().unwrap_or(i64::MAX); servers.retain(|server| { let server_staleness = self.calculate_secondary_staleness_with_primary(server, primary); @@ -307,7 +309,7 @@ impl TopologyDescription { servers: &mut Vec<&ServerDescription>, max_staleness: Duration, ) { - let max_staleness = max_staleness.as_millis() as i64; + let max_staleness = max_staleness.as_millis().try_into().unwrap_or(i64::MAX); let max_write_date = self .servers .values() @@ -347,7 +349,11 @@ impl TopologyDescription { let secondary_last_update = secondary.last_update_time?.timestamp_millis(); let secondary_last_write = secondary.last_write_date().ok()??.timestamp_millis(); - let heartbeat_frequency = self.heartbeat_frequency().as_millis() as i64; + let heartbeat_frequency = self + .heartbeat_frequency() + .as_millis() + .try_into() + .unwrap_or(i64::MAX); let staleness = (secondary_last_update - secondary_last_write) - (primary_last_update - primary_last_write) @@ -362,7 +368,11 @@ impl TopologyDescription { max_last_write_date: i64, ) -> Option { let secondary_last_write = secondary.last_write_date().ok()??.timestamp_millis(); - let heartbeat_frequency = self.heartbeat_frequency().as_millis() as i64; + let heartbeat_frequency = self + .heartbeat_frequency() + .as_millis() + .try_into() + .unwrap_or(i64::MAX); let staleness = max_last_write_date - secondary_last_write + heartbeat_frequency; Some(staleness) diff --git a/src/sdam/description/topology/server_selection/test/in_window.rs b/src/sdam/description/topology/server_selection/test/in_window.rs index 4cc651f13..0179a17d9 100644 --- a/src/sdam/description/topology/server_selection/test/in_window.rs +++ b/src/sdam/description/topology/server_selection/test/in_window.rs @@ -7,7 +7,6 @@ use serde::Deserialize; use crate::{ cmap::DEFAULT_MAX_POOL_SIZE, - coll::options::FindOptions, error::Result, event::cmap::CmapEvent, options::ServerAddress, @@ -19,12 +18,13 @@ use crate::{ get_client_options, log_uncaptured, run_spec_test, + util::event_buffer::EventBuffer, Event, - EventHandler, FailPoint, FailPointMode, TestClient, }, + Client, ServerInfo, }; @@ -155,7 +155,7 @@ async fn load_balancing_test() { setup_client .database("load_balancing_test") .collection("load_balancing_test") - .insert_one(doc! {}, None) + .insert_one(doc! {}) .await .unwrap(); @@ -163,7 +163,7 @@ async fn load_balancing_test() { /// was selected. max_share is the upper bound. async fn do_test( client: &TestClient, - handler: &mut EventHandler, + handler: &mut EventBuffer, min_share: f64, max_share: f64, iterations: usize, @@ -177,7 +177,7 @@ async fn load_balancing_test() { .collection::("load_balancing_test"); handles.push(runtime::spawn(async move { for _ in 0..iterations { - collection.find_one(None, None).await?; + collection.find_one(doc! {}).await?; } Ok(()) })) @@ -197,28 +197,36 @@ async fn load_balancing_test() { counts.sort(); let share_of_selections = (*counts[0] as f64) / ((*counts[0] + *counts[1]) as f64); - assert!( - share_of_selections <= max_share, - "expected no more than {}% of selections, instead got {}%", - (max_share * 100.0) as u32, - (share_of_selections * 100.0) as u32 - ); - assert!( - share_of_selections >= min_share, - "expected at least {}% of selections, instead got {}%", - (min_share * 100.0) as u32, - (share_of_selections * 100.0) as u32 - ); + #[allow(clippy::cast_possible_truncation)] + { + assert!( + share_of_selections <= max_share, + "expected no more than {}% of selections, instead got {}%", + (max_share * 100.0) as u32, + (share_of_selections * 100.0) as u32 + ); + assert!( + share_of_selections >= min_share, + "expected at least {}% of selections, instead got {}%", + (min_share * 100.0) as u32, + (share_of_selections * 100.0) as u32 + ); + } } - let mut handler = EventHandler::new(); - let mut subscriber = handler.subscribe(); + let mut buffer = EventBuffer::new(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); let mut options = get_client_options().await.clone(); let max_pool_size = DEFAULT_MAX_POOL_SIZE; let hosts = options.hosts.clone(); options.local_threshold = Duration::from_secs(30).into(); options.min_pool_size = Some(max_pool_size); - let client = TestClient::with_handler(Some(Arc::new(handler.clone())), options).await; + let client = Client::test_builder() + .options(options) + .event_buffer(buffer.clone()) + .build() + .await; // wait for both servers pools to be saturated. for address in hosts { @@ -227,13 +235,11 @@ async fn load_balancing_test() { let client = client.clone(); let selector = selector.clone(); runtime::spawn(async move { - let options = FindOptions::builder() - .selection_criteria(SelectionCriteria::Predicate(selector)) - .build(); client .database("load_balancing_test") .collection::("load_balancing_test") - .find(doc! { "$where": "sleep(500) && true" }, options) + .find(doc! { "$where": "sleep(500) && true" }) + .selection_criteria(SelectionCriteria::Predicate(selector)) .await .unwrap(); }); @@ -249,7 +255,6 @@ async fn load_balancing_test() { .expect("timed out waiting for both pools to be saturated"); conns += 1; } - drop(subscriber); // enable a failpoint on one of the mongoses to slow it down let slow_host = get_client_options().await.hosts[0].clone(); @@ -261,9 +266,9 @@ async fn load_balancing_test() { let guard = setup_client.configure_fail_point(fail_point).await.unwrap(); // verify that the lesser picked server (slower one) was picked less than 25% of the time. - do_test(&client, &mut handler, 0.05, 0.25, 10).await; + do_test(&client, &mut buffer, 0.05, 0.25, 10).await; // disable failpoint and rerun, should be back to even split drop(guard); - do_test(&client, &mut handler, 0.40, 0.50, 100).await; + do_test(&client, &mut buffer, 0.40, 0.50, 100).await; } diff --git a/src/sdam/description/topology/server_selection/test/logic.rs b/src/sdam/description/topology/server_selection/test/logic.rs index a6f5dd9d7..448d43e68 100644 --- a/src/sdam/description/topology/server_selection/test/logic.rs +++ b/src/sdam/description/topology/server_selection/test/logic.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use crate::{ error::{Error, Result}, - selection_criteria::{ReadPreference, ReadPreferenceOptions, TagSet}, + options::{ReadPreference, ReadPreferenceOptions, TagSet}, test::run_spec_test, }; @@ -28,12 +28,13 @@ struct TestFile { _operation: Option, } +// Deserialize into a helper struct to avoid deserialization errors for invalid read preferences. #[derive(Debug, Deserialize, Serialize)] -pub struct TestReadPreference { - pub mode: Option, - pub tag_sets: Option>, +struct TestReadPreference { + mode: Option, + tag_sets: Option>, #[serde(rename = "maxStalenessSeconds")] - pub max_staleness_seconds: Option, + max_staleness_seconds: Option, } impl TryFrom for ReadPreference { @@ -57,10 +58,18 @@ impl TryFrom for ReadPreference { } ReadPreference::Primary } - Some("Secondary") => ReadPreference::Secondary { options }, - Some("PrimaryPreferred") => ReadPreference::PrimaryPreferred { options }, - Some("SecondaryPreferred") => ReadPreference::SecondaryPreferred { options }, - Some("Nearest") => ReadPreference::Nearest { options }, + Some("Secondary") => ReadPreference::Secondary { + options: Some(options), + }, + Some("PrimaryPreferred") => ReadPreference::PrimaryPreferred { + options: Some(options), + }, + Some("SecondaryPreferred") => ReadPreference::SecondaryPreferred { + options: Some(options), + }, + Some("Nearest") => ReadPreference::Nearest { + options: Some(options), + }, Some(m) => { return Err(Error::invalid_argument( format!("invalid read preference mode: {}", m).as_str(), @@ -109,18 +118,18 @@ async fn run_test(test_file: TestFile) { Client, }; - let mut options = Vec::new(); + let mut uri_options = Vec::new(); if let Some(ref mode) = test_file.read_preference.mode { - options.push(format!("readPreference={}", mode)); + uri_options.push(format!("readPreference={}", mode)); } if let Some(max_staleness_seconds) = test_file.read_preference.max_staleness_seconds { - options.push(format!("maxStalenessSeconds={}", max_staleness_seconds)); + uri_options.push(format!("maxStalenessSeconds={}", max_staleness_seconds)); } if let Some(heartbeat_freq) = test_file.heartbeat_frequency_ms { - options.push(format!("heartbeatFrequencyMS={}", heartbeat_freq)); + uri_options.push(format!("heartbeatFrequencyMS={}", heartbeat_freq)); } - let uri_str = format!("mongodb://localhost:27017/?{}", options.join("&")); + let uri_str = format!("mongodb://localhost:27017/?{}", uri_options.join("&")); ClientOptions::parse(uri_str) .await .err() diff --git a/src/sdam/description/topology/test.rs b/src/sdam/description/topology/test.rs index 235b79841..9a4109177 100644 --- a/src/sdam/description/topology/test.rs +++ b/src/sdam/description/topology/test.rs @@ -6,6 +6,7 @@ use std::time::Duration; pub use event::TestSdamEvent; +#[allow(clippy::cast_possible_truncation)] pub(crate) fn f64_ms_as_duration(f: f64) -> Duration { Duration::from_micros((f * 1000.0) as u64) } diff --git a/src/sdam/description/topology/test/sdam.rs b/src/sdam/description/topology/test/sdam.rs index 2488c8c6e..8b6903e6f 100644 --- a/src/sdam/description/topology/test/sdam.rs +++ b/src/sdam/description/topology/test/sdam.rs @@ -5,6 +5,8 @@ use serde::Deserialize; use super::TestSdamEvent; +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ bson::{doc, oid::ObjectId}, client::Client, @@ -28,9 +30,8 @@ use crate::{ get_client_options, log_uncaptured, run_spec_test, + util::event_buffer::EventBuffer, Event, - EventClient, - EventHandler, FailPoint, FailPointMode, TestClient, @@ -273,11 +274,12 @@ async fn run_test(test_file: TestFile) { .await .expect(test_description); - let handler = Arc::new(EventHandler::new()); - options.sdam_event_handler = Some(handler.clone().into()); + let buffer = EventBuffer::new(); + options.sdam_event_handler = Some(buffer.handler()); options.test_options_mut().disable_monitoring_threads = true; - let mut event_subscriber = handler.subscribe(); + #[allow(deprecated)] + let mut event_subscriber = buffer.subscribe(); let mut topology = Topology::new(options.clone()).unwrap(); for (i, phase) in test_file.phases.into_iter().enumerate() { @@ -589,20 +591,22 @@ async fn load_balanced() { #[tokio::test] #[function_name::named] async fn topology_closed_event_last() { - let event_handler = EventHandler::new(); - let mut subscriber = event_handler.subscribe(); + let event_buffer = EventBuffer::new(); + #[allow(deprecated)] let client = EventClient::with_additional_options( None, Some(Duration::from_millis(50)), None, - event_handler.clone(), + event_buffer.clone(), ) .await; + #[allow(deprecated)] + let mut subscriber = event_buffer.subscribe_all(); client .database(function_name!()) .collection(function_name!()) - .insert_one(doc! { "x": 1 }, None) + .insert_one(doc! { "x": 1 }) .await .unwrap(); drop(client); @@ -634,17 +638,19 @@ async fn heartbeat_events() { options.heartbeat_freq = Some(Duration::from_millis(50)); options.app_name = "heartbeat_events".to_string().into(); - let event_handler = EventHandler::new(); - let mut subscriber = event_handler.subscribe(); - + let event_buffer = EventBuffer::new(); + #[allow(deprecated)] let client = EventClient::with_additional_options( Some(options.clone()), Some(Duration::from_millis(50)), None, - event_handler.clone(), + event_buffer.clone(), ) .await; + #[allow(deprecated)] + let mut subscriber = event_buffer.subscribe_all(); + if client.is_load_balanced() { log_uncaptured("skipping heartbeat_events tests due to load-balanced topology"); return; @@ -715,7 +721,7 @@ async fn direct_connection() { direct_false_client .database(function_name!()) .collection(function_name!()) - .insert_one(doc! {}, None) + .insert_one(doc! {}) .await .expect("write should succeed with directConnection=false on secondary"); @@ -726,7 +732,7 @@ async fn direct_connection() { let error = direct_true_client .database(function_name!()) .collection(function_name!()) - .insert_one(doc! {}, None) + .insert_one(doc! {}) .await .expect_err("write should fail with directConnection=true on secondary"); assert!(error.is_notwritableprimary()); @@ -736,7 +742,7 @@ async fn direct_connection() { client .database(function_name!()) .collection(function_name!()) - .insert_one(doc! {}, None) + .insert_one(doc! {}) .await .expect("write should succeed with directConnection unspecified"); } diff --git a/src/sdam/test.rs b/src/sdam/test.rs index 7b293fea4..f8baded81 100644 --- a/src/sdam/test.rs +++ b/src/sdam/test.rs @@ -1,12 +1,13 @@ use std::{ collections::HashSet, - sync::Arc, time::{Duration, Instant}, }; use bson::doc; use semver::VersionReq; +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ client::options::{ClientOptions, ServerAddress}, cmap::RawCommandResponse, @@ -17,9 +18,8 @@ use crate::{ test::{ get_client_options, log_uncaptured, + util::event_buffer::EventBuffer, Event, - EventClient, - EventHandler, FailPoint, FailPointMode, TestClient, @@ -96,16 +96,18 @@ async fn sdam_pool_management() { options.app_name = Some("SDAMPoolManagementTest".to_string()); options.heartbeat_freq = Some(Duration::from_millis(50)); - let event_handler = EventHandler::new(); - let mut subscriber = event_handler.subscribe(); + let event_buffer = EventBuffer::new(); + #[allow(deprecated)] let client = EventClient::with_additional_options( Some(options), Some(Duration::from_millis(50)), None, - event_handler.clone(), + event_buffer.clone(), ) .await; + #[allow(deprecated)] + let mut subscriber = event_buffer.subscribe_all(); if !VersionReq::parse(">= 4.2.9") .unwrap() @@ -186,11 +188,12 @@ async fn hello_ok_true() { return; } - let handler = Arc::new(EventHandler::new()); - let mut subscriber = handler.subscribe(); + let buffer = EventBuffer::new(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); let mut options = setup_client_options.clone(); - options.sdam_event_handler = Some(handler.clone().into()); + options.sdam_event_handler = Some(buffer.handler()); options.direct_connection = Some(true); options.heartbeat_freq = Some(Duration::from_millis(500)); let _client = Client::with_options(options).expect("client creation should succeed"); @@ -256,7 +259,7 @@ async fn repl_set_name_mismatch() -> crate::error::Result<()> { /// topology. #[tokio::test(flavor = "multi_thread")] async fn removed_server_monitor_stops() -> crate::error::Result<()> { - let handler = Arc::new(EventHandler::new()); + let buffer = EventBuffer::new(); let options = ClientOptions::builder() .hosts(vec![ ServerAddress::parse("localhost:49152")?, @@ -264,14 +267,15 @@ async fn removed_server_monitor_stops() -> crate::error::Result<()> { ServerAddress::parse("localhost:49154")?, ]) .heartbeat_freq(Duration::from_millis(50)) - .sdam_event_handler(handler.clone()) + .sdam_event_handler(buffer.handler()) .repl_set_name("foo".to_string()) .build(); let hosts = options.hosts.clone(); let set_name = options.repl_set_name.clone().unwrap(); - let mut subscriber = handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); let topology = Topology::new(options)?; // Wait until all three monitors have started. diff --git a/src/search_index.rs b/src/search_index.rs index 1124f2015..8c121b32d 100644 --- a/src/search_index.rs +++ b/src/search_index.rs @@ -1,89 +1,9 @@ -use self::options::*; -use crate::{ - bson::Document, - coll::options::AggregateOptions, - error::{Error, Result}, - operation::{CreateSearchIndexes, DropSearchIndex, UpdateSearchIndex}, - Collection, - Cursor, -}; +use crate::bson::Document; use bson::doc; use serde::{Deserialize, Serialize}; use typed_builder::TypedBuilder; -impl Collection { - /// Convenience method for creating a single search index. - pub async fn create_search_index( - &self, - model: SearchIndexModel, - options: impl Into>, - ) -> Result { - let mut names = self.create_search_indexes(Some(model), options).await?; - match names.len() { - 1 => Ok(names.pop().unwrap()), - n => Err(Error::internal(format!("expected 1 index name, got {}", n))), - } - } - - /// Creates multiple search indexes on the collection. - pub async fn create_search_indexes( - &self, - models: impl IntoIterator, - _options: impl Into>, - ) -> Result> { - let op = CreateSearchIndexes::new(self.namespace(), models.into_iter().collect()); - self.client().execute_operation(op, None).await - } - - /// Updates the search index with the given name to use the provided definition. - pub async fn update_search_index( - &self, - name: impl AsRef, - definition: Document, - _options: impl Into>, - ) -> Result<()> { - let op = UpdateSearchIndex::new( - self.namespace(), - name.as_ref().to_string(), - definition.clone(), - ); - self.client().execute_operation(op, None).await - } - - /// Drops the search index with the given name. - pub async fn drop_search_index( - &self, - name: impl AsRef, - _options: impl Into>, - ) -> Result<()> { - let op = DropSearchIndex::new(self.namespace(), name.as_ref().to_string()); - self.client().execute_operation(op, None).await - } - - /// Gets index information for one or more search indexes in the collection. - /// - /// If name is not specified, information for all indexes on the specified collection will be - /// returned. - pub async fn list_search_indexes( - &self, - name: impl Into>, - aggregation_options: impl Into>, - _list_index_options: impl Into>, - ) -> Result> { - let mut inner = doc! {}; - if let Some(name) = name.into() { - inner.insert("name", name.to_string()); - } - self.clone_unconcerned() - .aggregate(vec![doc! { - "$listSearchIndexes": inner, - }]) - .with_options(aggregation_options) - .await - } -} - /// Specifies the options for a search index. #[derive(Debug, Clone, Default, TypedBuilder, Serialize, Deserialize)] #[builder(field_defaults(default, setter(into)))] diff --git a/src/selection_criteria.rs b/src/selection_criteria.rs index 640475a6d..250dec83c 100644 --- a/src/selection_criteria.rs +++ b/src/selection_criteria.rs @@ -66,8 +66,8 @@ impl SelectionCriteria { S: serde::Serializer, { match selection_criteria { - Some(SelectionCriteria::ReadPreference(pref)) => { - ReadPreference::serialize_for_client_options(pref, serializer) + Some(SelectionCriteria::ReadPreference(read_preference)) => { + read_preference.serialize(serializer) } _ => serializer.serialize_none(), } @@ -98,64 +98,53 @@ pub type Predicate = Arc bool>; /// See the [MongoDB docs](https://www.mongodb.com/docs/manual/core/read-preference) for more details. #[allow(missing_docs)] #[derive(Clone, Debug, PartialEq)] +#[non_exhaustive] pub enum ReadPreference { /// Only route this operation to the primary. Primary, /// Only route this operation to a secondary. - Secondary { options: ReadPreferenceOptions }, + Secondary { + options: Option, + }, /// Route this operation to the primary if it's available, but fall back to the secondaries if /// not. - PrimaryPreferred { options: ReadPreferenceOptions }, + PrimaryPreferred { + options: Option, + }, /// Route this operation to a secondary if one is available, but fall back to the primary if /// not. - SecondaryPreferred { options: ReadPreferenceOptions }, + SecondaryPreferred { + options: Option, + }, /// Route this operation to the node with the least network latency regardless of whether it's /// the primary or a secondary. - Nearest { options: ReadPreferenceOptions }, + Nearest { + options: Option, + }, } impl std::fmt::Display for ReadPreference { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{{ Mode: ")?; - let opts_ref = match self { - ReadPreference::Primary => { - write!(f, "Primary")?; - None - } - ReadPreference::Secondary { options } => { - write!(f, "Secondary")?; - Some(options) - } - ReadPreference::PrimaryPreferred { options } => { - write!(f, "PrimaryPreferred")?; - Some(options) - } - ReadPreference::SecondaryPreferred { options } => { - write!(f, "SecondaryPreferred")?; - Some(options) + let mut mode = self.mode().to_string(); + mode[0..1].make_ascii_uppercase(); + write!(f, "{{ Mode: {}", mode)?; + + if let Some(options) = self.options() { + if let Some(ref tag_sets) = options.tag_sets { + write!(f, ", Tag Sets: {:?}", tag_sets)?; } - ReadPreference::Nearest { options } => { - write!(f, "Nearest")?; - Some(options) + if let Some(ref max_staleness) = options.max_staleness { + write!(f, ", Max Staleness: {:?}", max_staleness)?; } - }; - if let Some(opts) = opts_ref { - if !opts.is_default() { - if let Some(ref tag_sets) = opts.tag_sets { - write!(f, ", Tag Sets: {:?}", tag_sets)?; - } - if let Some(ref max_staleness) = opts.max_staleness { - write!(f, ", Max Staleness: {:?}", max_staleness)?; - } - if let Some(ref hedge) = opts.hedge { - write!(f, ", Hedge: {}", hedge.enabled)?; - } + if let Some(ref hedge) = options.hedge { + write!(f, ", Hedge: {}", hedge.enabled)?; } } + write!(f, " }}") } } @@ -172,29 +161,28 @@ impl<'de> Deserialize<'de> for ReadPreference { #[serde(flatten)] options: ReadPreferenceOptions, } - let preference = ReadPreferenceHelper::deserialize(deserializer)?; - match preference.mode.to_ascii_lowercase().as_str() { + let helper = ReadPreferenceHelper::deserialize(deserializer)?; + match helper.mode.to_ascii_lowercase().as_str() { "primary" => { - if !preference.options.is_default() { - return Err(D::Error::custom(&format!( - "no options can be specified with read preference mode = primary, but got \ - {:?}", - preference.options + if !helper.options.is_default() { + return Err(D::Error::custom(format!( + "cannot specify options for primary read preference, got {:?}", + helper.options ))); } Ok(ReadPreference::Primary) } "secondary" => Ok(ReadPreference::Secondary { - options: preference.options, + options: Some(helper.options), }), "primarypreferred" => Ok(ReadPreference::PrimaryPreferred { - options: preference.options, + options: Some(helper.options), }), "secondarypreferred" => Ok(ReadPreference::SecondaryPreferred { - options: preference.options, + options: Some(helper.options), }), "nearest" => Ok(ReadPreference::Nearest { - options: preference.options, + options: Some(helper.options), }), other => Err(D::Error::custom(format!( "Unknown read preference mode: {}", @@ -211,35 +199,17 @@ impl Serialize for ReadPreference { { #[serde_with::skip_serializing_none] #[derive(Serialize)] - #[serde(rename_all = "camelCase", deny_unknown_fields)] + #[serde(rename_all = "camelCase")] struct ReadPreferenceHelper<'a> { mode: &'static str, #[serde(flatten)] options: Option<&'a ReadPreferenceOptions>, } - let helper = match self { - ReadPreference::Primary => ReadPreferenceHelper { - mode: "primary", - options: None, - }, - ReadPreference::PrimaryPreferred { options } => ReadPreferenceHelper { - mode: "primaryPreferred", - options: Some(options), - }, - ReadPreference::Secondary { options } => ReadPreferenceHelper { - mode: "secondary", - options: Some(options), - }, - ReadPreference::SecondaryPreferred { options } => ReadPreferenceHelper { - mode: "secondaryPreferred", - options: Some(options), - }, - ReadPreference::Nearest { options } => ReadPreferenceHelper { - mode: "nearest", - options: Some(options), - }, - }; + let helper = ReadPreferenceHelper { + mode: self.mode(), + options: self.options(), + }; helper.serialize(serializer) } } @@ -253,6 +223,7 @@ impl Serialize for ReadPreference { pub struct ReadPreferenceOptions { /// Specifies which replica set members should be considered for operations. Each tag set will /// be checked in order until one or more servers is found with each tag in the set. + #[serde(alias = "tag_sets")] pub tag_sets: Option>, /// Specifies the maximum amount of lag behind the primary that a secondary can be to be @@ -308,19 +279,37 @@ impl HedgedReadOptions { } impl ReadPreference { - pub(crate) fn max_staleness(&self) -> Option { + pub(crate) fn mode(&self) -> &'static str { match self { - ReadPreference::Primary => None, - ReadPreference::Secondary { ref options } - | ReadPreference::PrimaryPreferred { ref options } - | ReadPreference::SecondaryPreferred { ref options } - | ReadPreference::Nearest { ref options } => options.max_staleness, + Self::Primary => "primary", + Self::Secondary { .. } => "secondary", + Self::PrimaryPreferred { .. } => "primaryPreferred", + Self::SecondaryPreferred { .. } => "secondaryPreferred", + Self::Nearest { .. } => "nearest", } } + pub(crate) fn options(&self) -> Option<&ReadPreferenceOptions> { + match self { + Self::Primary => None, + Self::Secondary { options } + | Self::PrimaryPreferred { options } + | Self::SecondaryPreferred { options } + | Self::Nearest { options } => options.as_ref(), + } + } + + pub(crate) fn max_staleness(&self) -> Option { + self.options().and_then(|options| options.max_staleness) + } + + pub(crate) fn tag_sets(&self) -> Option<&Vec> { + self.options().and_then(|options| options.tag_sets.as_ref()) + } + pub(crate) fn with_tags(mut self, tag_sets: Vec) -> Result { let options = match self { - ReadPreference::Primary => { + Self::Primary => { return Err(ErrorKind::InvalidArgument { message: "read preference tags can only be specified when a non-primary mode \ is specified" @@ -328,13 +317,13 @@ impl ReadPreference { } .into()); } - ReadPreference::Secondary { ref mut options } => options, - ReadPreference::PrimaryPreferred { ref mut options } => options, - ReadPreference::SecondaryPreferred { ref mut options } => options, - ReadPreference::Nearest { ref mut options } => options, + Self::Secondary { ref mut options } => options, + Self::PrimaryPreferred { ref mut options } => options, + Self::SecondaryPreferred { ref mut options } => options, + Self::Nearest { ref mut options } => options, }; - options.tag_sets = Some(tag_sets); + options.get_or_insert_with(Default::default).tag_sets = Some(tag_sets); Ok(self) } @@ -355,59 +344,10 @@ impl ReadPreference { ReadPreference::Nearest { ref mut options } => options, }; - options.max_staleness = Some(max_staleness); + options.get_or_insert_with(Default::default).max_staleness = Some(max_staleness); Ok(self) } - - #[cfg(test)] - pub(crate) fn serialize_for_client_options( - read_preference: &ReadPreference, - serializer: S, - ) -> std::result::Result - where - S: serde::Serializer, - { - #[derive(serde::Serialize)] - struct ReadPreferenceHelper<'a> { - readpreference: &'a str, - - readpreferencetags: Option<&'a Vec>>, - - #[serde(serialize_with = "serde_util::duration_option_as_int_seconds::serialize")] - maxstalenessseconds: Option, - } - - let state = match read_preference { - ReadPreference::Primary => ReadPreferenceHelper { - readpreference: "primary", - readpreferencetags: None, - maxstalenessseconds: None, - }, - ReadPreference::PrimaryPreferred { options } => ReadPreferenceHelper { - readpreference: "primaryPreferred", - readpreferencetags: options.tag_sets.as_ref(), - maxstalenessseconds: options.max_staleness, - }, - ReadPreference::Secondary { options } => ReadPreferenceHelper { - readpreference: "secondary", - readpreferencetags: options.tag_sets.as_ref(), - maxstalenessseconds: options.max_staleness, - }, - ReadPreference::SecondaryPreferred { options } => ReadPreferenceHelper { - readpreference: "secondaryPreferred", - readpreferencetags: options.tag_sets.as_ref(), - maxstalenessseconds: options.max_staleness, - }, - ReadPreference::Nearest { options } => ReadPreferenceHelper { - readpreference: "nearest", - readpreferencetags: options.tag_sets.as_ref(), - maxstalenessseconds: options.max_staleness, - }, - }; - - state.serialize(serializer) - } } /// A read preference tag set. See the documentation [here](https://www.mongodb.com/docs/manual/tutorial/configure-replica-set-tag-sets/) for more details. @@ -420,9 +360,11 @@ mod test { #[test] fn hedged_read_included_in_document() { - let options = ReadPreferenceOptions::builder() - .hedge(HedgedReadOptions { enabled: true }) - .build(); + let options = Some( + ReadPreferenceOptions::builder() + .hedge(HedgedReadOptions { enabled: true }) + .build(), + ); let read_pref = ReadPreference::Secondary { options }; let doc = bson::to_document(&read_pref).unwrap(); diff --git a/src/serde_util.rs b/src/serde_util.rs index fc1f5b721..ee3f01f55 100644 --- a/src/serde_util.rs +++ b/src/serde_util.rs @@ -21,9 +21,13 @@ pub(crate) mod duration_option_as_int_seconds { serializer: S, ) -> std::result::Result { match val { - Some(duration) if duration.as_secs() > i32::MAX as u64 => { - serializer.serialize_i64(duration.as_secs() as i64) - } + Some(duration) if duration.as_secs() > i32::MAX as u64 => serializer.serialize_i64( + duration + .as_secs() + .try_into() + .map_err(serde::ser::Error::custom)?, + ), + #[allow(clippy::cast_possible_truncation)] Some(duration) => serializer.serialize_i32(duration.as_secs() as i32), None => serializer.serialize_none(), } @@ -45,9 +49,13 @@ pub(crate) fn serialize_duration_option_as_int_millis( serializer: S, ) -> std::result::Result { match val { - Some(duration) if duration.as_millis() > i32::MAX as u128 => { - serializer.serialize_i64(duration.as_millis() as i64) - } + Some(duration) if duration.as_millis() > i32::MAX as u128 => serializer.serialize_i64( + duration + .as_millis() + .try_into() + .map_err(serde::ser::Error::custom)?, + ), + #[allow(clippy::cast_possible_truncation)] Some(duration) => serializer.serialize_i32(duration.as_millis() as i32), None => serializer.serialize_none(), } @@ -80,6 +88,7 @@ pub(crate) fn serialize_u32_option_as_batch_size( serializer: S, ) -> std::result::Result { match val { + #[allow(clippy::cast_possible_wrap)] Some(val) if *val <= std::i32::MAX as u32 => (doc! { "batchSize": (*val as i32) }) @@ -144,6 +153,7 @@ where } let date_time = match AwsDateTime::deserialize(deserializer)? { + #[allow(clippy::cast_possible_truncation)] AwsDateTime::Double(seconds) => { let millis = seconds * 1000.0; bson::DateTime::from_millis(millis as i64) diff --git a/src/sync/change_stream.rs b/src/sync/change_stream.rs index cac5f42ce..516900c5d 100644 --- a/src/sync/change_stream.rs +++ b/src/sync/change_stream.rs @@ -36,7 +36,7 @@ use super::ClientSession; /// # let client = Client::with_uri_str("mongodb://example.com")?; /// # let coll = client.database("foo").collection("bar"); /// let mut change_stream = coll.watch().run()?; -/// coll.insert_one(doc! { "x": 1 }, None)?; +/// coll.insert_one(doc! { "x": 1 }).run()?; /// for event in change_stream { /// let event = event?; /// println!("operation performed: {:?}, document: {:?}", event.operation_type, event.full_document); @@ -192,7 +192,7 @@ where /// let mut cs = coll.watch().session(&mut session).run()?; /// while let Some(event) = cs.next(&mut session)? { /// let id = bson::to_bson(&event.id)?; - /// other_coll.insert_one_with_session(doc! { "id": id }, None, &mut session)?; + /// other_coll.insert_one(doc! { "id": id }).session(&mut session).run()?; /// } /// # Ok::<(), mongodb::error::Error>(()) /// # }; diff --git a/src/sync/client/session.rs b/src/sync/client/session.rs index 8272f5fef..18070afec 100644 --- a/src/sync/client/session.rs +++ b/src/sync/client/session.rs @@ -1,11 +1,5 @@ use super::Client; -use crate::{ - bson::Document, - client::session::ClusterTime, - error::Result, - options::TransactionOptions, - ClientSession as AsyncClientSession, -}; +use crate::{bson::Document, client::session::ClusterTime, ClientSession as AsyncClientSession}; /// A MongoDB client session. This struct represents a logical session used for ordering sequential /// operations. To create a `ClientSession`, call `start_session` on a @@ -32,6 +26,12 @@ impl<'a> From<&'a mut ClientSession> for &'a mut AsyncClientSession { } impl ClientSession { + pub(crate) fn new(async_client_session: AsyncClientSession) -> Self { + Self { + async_client_session, + } + } + /// The client used to create this session. pub fn client(&self) -> Client { self.async_client_session.client().into() @@ -53,162 +53,4 @@ impl ClientSession { pub fn advance_cluster_time(&mut self, to: &ClusterTime) { self.async_client_session.advance_cluster_time(to) } - - /// Starts a new transaction on this session with the given `TransactionOptions`. If no options - /// are provided, the session's `defaultTransactionOptions` will be used. This session must - /// be passed into each operation within the transaction; otherwise, the operation will be - /// executed outside of the transaction. - /// - /// ```rust - /// # use mongodb::{bson::{doc, Document}, error::Result, sync::{Client, ClientSession}}; - /// # - /// # async fn do_stuff() -> Result<()> { - /// # let client = Client::with_uri_str("mongodb://example.com")?; - /// # let coll = client.database("foo").collection::("bar"); - /// # let mut session = client.start_session().run()?; - /// session.start_transaction(None)?; - /// let result = coll.insert_one_with_session(doc! { "x": 1 }, None, &mut session)?; - /// session.commit_transaction()?; - /// # Ok(()) - /// # } - /// ``` - pub fn start_transaction( - &mut self, - options: impl Into>, - ) -> Result<()> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_client_session.start_transaction(options)) - } - - /// Commits the transaction that is currently active on this session. - /// - /// ```rust - /// # use mongodb::{bson::{doc, Document}, error::Result, sync::{Client, ClientSession}}; - /// # - /// # async fn do_stuff() -> Result<()> { - /// # let client = Client::with_uri_str("mongodb://example.com")?; - /// # let coll = client.database("foo").collection::("bar"); - /// # let mut session = client.start_session().run()?; - /// session.start_transaction(None)?; - /// let result = coll.insert_one_with_session(doc! { "x": 1 }, None, &mut session)?; - /// session.commit_transaction()?; - /// # Ok(()) - /// # } - /// ``` - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn commit_transaction(&mut self) -> Result<()> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_client_session.commit_transaction()) - } - - /// Aborts the transaction that is currently active on this session. Any open transaction will - /// be aborted automatically in the `Drop` implementation of `ClientSession`. - /// - /// ```rust - /// # use mongodb::{bson::{doc, Document}, error::Result, sync::{Client, ClientSession, Collection}}; - /// # - /// # async fn do_stuff() -> Result<()> { - /// # let client = Client::with_uri_str("mongodb://example.com")?; - /// # let coll = client.database("foo").collection::("bar"); - /// # let mut session = client.start_session().run()?; - /// session.start_transaction(None)?; - /// match execute_transaction(coll, &mut session) { - /// Ok(_) => session.commit_transaction()?, - /// Err(_) => session.abort_transaction()?, - /// } - /// # Ok(()) - /// # } - /// - /// fn execute_transaction(coll: Collection, session: &mut ClientSession) -> Result<()> { - /// coll.insert_one_with_session(doc! { "x": 1 }, None, session)?; - /// coll.delete_one(doc! { "y": 2 }).session(session).run()?; - /// Ok(()) - /// } - /// ``` - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn abort_transaction(&mut self) -> Result<()> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_client_session.abort_transaction()) - } - - /// Starts a transaction, runs the given callback, and commits or aborts the transaction. - /// Transient transaction errors will cause the callback or the commit to be retried; - /// other errors will cause the transaction to be aborted and the error returned to the - /// caller. If the callback needs to provide its own error information, the - /// [`Error::custom`](crate::error::Error::custom) method can accept an arbitrary payload that - /// can be retrieved via [`Error::get_custom`](crate::error::Error::get_custom). - /// - /// If a command inside the callback fails, it may cause the transaction on the server to be - /// aborted. This situation is normally handled transparently by the driver. However, if the - /// application does not return that error from the callback, the driver will not be able to - /// determine whether the transaction was aborted or not. The driver will then retry the - /// callback indefinitely. To avoid this situation, the application MUST NOT silently handle - /// errors within the callback. If the application needs to handle errors within the - /// callback, it MUST return them after doing so. - pub fn with_transaction( - &mut self, - mut callback: F, - options: impl Into>, - ) -> Result - where - F: for<'a> FnMut(&'a mut ClientSession) -> Result, - { - let options = options.into(); - let timeout = std::time::Duration::from_secs(120); - let start = std::time::Instant::now(); - - use crate::{ - client::session::TransactionState, - error::{TRANSIENT_TRANSACTION_ERROR, UNKNOWN_TRANSACTION_COMMIT_RESULT}, - }; - - 'transaction: loop { - self.start_transaction(options.clone())?; - let ret = match callback(self) { - Ok(v) => v, - Err(e) => { - if matches!( - self.async_client_session.transaction.state, - TransactionState::Starting | TransactionState::InProgress - ) { - self.abort_transaction()?; - } - if e.contains_label(TRANSIENT_TRANSACTION_ERROR) && start.elapsed() < timeout { - continue 'transaction; - } - return Err(e); - } - }; - if matches!( - self.async_client_session.transaction.state, - TransactionState::None - | TransactionState::Aborted - | TransactionState::Committed { .. } - ) { - return Ok(ret); - } - 'commit: loop { - match self.commit_transaction() { - Ok(()) => return Ok(ret), - Err(e) => { - if e.is_max_time_ms_expired_error() || start.elapsed() >= timeout { - return Err(e); - } - if e.contains_label(UNKNOWN_TRANSACTION_COMMIT_RESULT) { - continue 'commit; - } - if e.contains_label(TRANSIENT_TRANSACTION_ERROR) { - continue 'transaction; - } - return Err(e); - } - } - } - } - } } diff --git a/src/sync/coll.rs b/src/sync/coll.rs index ddbba1bed..74c7bcc7c 100644 --- a/src/sync/coll.rs +++ b/src/sync/coll.rs @@ -1,26 +1,5 @@ -use std::{borrow::Borrow, fmt::Debug}; - -use serde::{de::DeserializeOwned, Serialize}; - -use super::{ClientSession, Cursor, SessionCursor}; use crate::{ - bson::Document, - error::Result, - options::{ - FindOneAndDeleteOptions, - FindOneAndReplaceOptions, - FindOneAndUpdateOptions, - FindOneOptions, - FindOptions, - InsertManyOptions, - InsertOneOptions, - ReadConcern, - ReplaceOptions, - SelectionCriteria, - UpdateModifications, - WriteConcern, - }, - results::{InsertManyResult, InsertOneResult, UpdateResult}, + options::{ReadConcern, SelectionCriteria, WriteConcern}, Collection as AsyncCollection, Namespace, }; @@ -50,7 +29,7 @@ use crate::{ /// /// std::thread::spawn(move || { /// // Perform operations with `coll_ref`. For example: -/// coll_ref.insert_one(doc! { "x": i }, None); +/// coll_ref.insert_one(doc! { "x": i }); /// }); /// } /// # @@ -62,17 +41,23 @@ use crate::{ /// ``` #[derive(Clone, Debug)] -pub struct Collection { +pub struct Collection +where + T: Send + Sync, +{ pub(crate) async_collection: AsyncCollection, } -impl Collection { +impl Collection +where + T: Send + Sync, +{ pub(crate) fn new(async_collection: AsyncCollection) -> Self { Self { async_collection } } /// Gets a clone of the `Collection` with a different type `U`. - pub fn clone_with_type(&self) -> Collection { + pub fn clone_with_type(&self) -> Collection { Collection::new(self.async_collection.clone_with_type()) } @@ -105,315 +90,4 @@ impl Collection { pub fn write_concern(&self) -> Option<&WriteConcern> { self.async_collection.write_concern() } - - /// Finds the documents in the collection matching `filter`. - pub fn find( - &self, - filter: impl Into>, - options: impl Into>, - ) -> Result> { - crate::sync::TOKIO_RUNTIME - .block_on(self.async_collection.find(filter.into(), options.into())) - .map(Cursor::new) - } - - /// Finds the documents in the collection matching `filter` using the provided `ClientSession`. - pub fn find_with_session( - &self, - filter: impl Into>, - options: impl Into>, - session: &mut ClientSession, - ) -> Result> { - crate::sync::TOKIO_RUNTIME - .block_on(self.async_collection.find_with_session( - filter.into(), - options.into(), - &mut session.async_client_session, - )) - .map(SessionCursor::new) - } -} - -impl Collection -where - T: DeserializeOwned + Unpin + Send + Sync, -{ - /// Finds a single document in the collection matching `filter`. - pub fn find_one( - &self, - filter: impl Into>, - options: impl Into>, - ) -> Result> { - crate::sync::TOKIO_RUNTIME.block_on( - self.async_collection - .find_one(filter.into(), options.into()), - ) - } - - /// Finds a single document in the collection matching `filter` using the provided - /// `ClientSession`. - pub fn find_one_with_session( - &self, - filter: impl Into>, - options: impl Into>, - session: &mut ClientSession, - ) -> Result> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_collection.find_one_with_session( - filter.into(), - options.into(), - &mut session.async_client_session, - )) - } -} - -impl Collection -where - T: DeserializeOwned, -{ - /// Atomically finds up to one document in the collection matching `filter` and deletes it. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn find_one_and_delete( - &self, - filter: Document, - options: impl Into>, - ) -> Result> { - crate::sync::TOKIO_RUNTIME.block_on( - self.async_collection - .find_one_and_delete(filter, options.into()), - ) - } - - /// Atomically finds up to one document in the collection matching `filter` and deletes it using - /// the provided `ClientSession`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn find_one_and_delete_with_session( - &self, - filter: Document, - options: impl Into>, - session: &mut ClientSession, - ) -> Result> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_collection.find_one_and_delete_with_session( - filter, - options.into(), - &mut session.async_client_session, - )) - } - - /// Atomically finds up to one document in the collection matching `filter` and updates it. - /// Both `Document` and `Vec` implement `Into`, so either can be - /// passed in place of constructing the enum case. Note: pipeline updates are only supported - /// in MongoDB 4.2+. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn find_one_and_update( - &self, - filter: Document, - update: impl Into, - options: impl Into>, - ) -> Result> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_collection.find_one_and_update( - filter, - update.into(), - options.into(), - )) - } - - /// Atomically finds up to one document in the collection matching `filter` and updates it using - /// the provided `ClientSession`. Both `Document` and `Vec` implement - /// `Into`, so either can be passed in place of constructing the enum - /// case. Note: pipeline updates are only supported in MongoDB 4.2+. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn find_one_and_update_with_session( - &self, - filter: Document, - update: impl Into, - options: impl Into>, - session: &mut ClientSession, - ) -> Result> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_collection.find_one_and_update_with_session( - filter, - update.into(), - options.into(), - &mut session.async_client_session, - )) - } -} - -impl Collection -where - T: Serialize + DeserializeOwned, -{ - /// Atomically finds up to one document in the collection matching `filter` and replaces it with - /// `replacement`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn find_one_and_replace( - &self, - filter: Document, - replacement: T, - options: impl Into>, - ) -> Result> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_collection.find_one_and_replace( - filter, - replacement, - options.into(), - )) - } - - /// Atomically finds up to one document in the collection matching `filter` and replaces it with - /// `replacement` using the provided `ClientSession`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn find_one_and_replace_with_session( - &self, - filter: Document, - replacement: T, - options: impl Into>, - session: &mut ClientSession, - ) -> Result> { - crate::sync::TOKIO_RUNTIME.block_on( - self.async_collection.find_one_and_replace_with_session( - filter, - replacement, - options.into(), - &mut session.async_client_session, - ), - ) - } -} - -impl Collection -where - T: Serialize, -{ - /// Inserts the documents in `docs` into the collection. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn insert_many( - &self, - docs: impl IntoIterator>, - options: impl Into>, - ) -> Result { - crate::sync::TOKIO_RUNTIME.block_on(self.async_collection.insert_many(docs, options.into())) - } - - /// Inserts the documents in `docs` into the collection using the provided `ClientSession`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn insert_many_with_session( - &self, - docs: impl IntoIterator>, - options: impl Into>, - session: &mut ClientSession, - ) -> Result { - crate::sync::TOKIO_RUNTIME.block_on(self.async_collection.insert_many_with_session( - docs, - options.into(), - &mut session.async_client_session, - )) - } - - /// Inserts `doc` into the collection. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn insert_one( - &self, - doc: impl Borrow, - options: impl Into>, - ) -> Result { - crate::sync::TOKIO_RUNTIME.block_on( - self.async_collection - .insert_one(doc.borrow(), options.into()), - ) - } - - /// Inserts `doc` into the collection using the provided `ClientSession`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn insert_one_with_session( - &self, - doc: impl Borrow, - options: impl Into>, - session: &mut ClientSession, - ) -> Result { - crate::sync::TOKIO_RUNTIME.block_on(self.async_collection.insert_one_with_session( - doc.borrow(), - options.into(), - &mut session.async_client_session, - )) - } - - /// Replaces up to one document matching `query` in the collection with `replacement`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn replace_one( - &self, - query: Document, - replacement: impl Borrow, - options: impl Into>, - ) -> Result { - crate::sync::TOKIO_RUNTIME.block_on(self.async_collection.replace_one( - query, - replacement.borrow(), - options.into(), - )) - } - - /// Replaces up to one document matching `query` in the collection with `replacement` using the - /// provided `ClientSession`. - /// - /// This operation will retry once upon failure if the connection and encountered error support - /// retryability. See the documentation - /// [here](https://www.mongodb.com/docs/manual/core/retryable-writes/) for more information on - /// retryable writes. - pub fn replace_one_with_session( - &self, - query: Document, - replacement: impl Borrow, - options: impl Into>, - session: &mut ClientSession, - ) -> Result { - crate::sync::TOKIO_RUNTIME.block_on(self.async_collection.replace_one_with_session( - query, - replacement.borrow(), - options.into(), - &mut session.async_client_session, - )) - } } diff --git a/src/sync/cursor.rs b/src/sync/cursor.rs index 779848117..5c1d7724a 100644 --- a/src/sync/cursor.rs +++ b/src/sync/cursor.rs @@ -31,12 +31,12 @@ use crate::{ /// documents it yields using a for loop: /// /// ```rust -/// # use mongodb::{bson::Document, sync::Client, error::Result}; +/// # use mongodb::{bson::{doc, Document}, sync::Client, error::Result}; /// # /// # fn do_stuff() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://example.com")?; /// # let coll = client.database("foo").collection::("bar"); -/// # let mut cursor = coll.find(None, None)?; +/// # let mut cursor = coll.find(doc! {}).run()?; /// # /// for doc in cursor { /// println!("{}", doc?) @@ -60,7 +60,7 @@ use crate::{ /// # fn do_stuff() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://example.com")?; /// # let coll = client.database("foo").collection("bar"); -/// # let cursor = coll.find(Some(doc! { "x": 1 }), None)?; +/// # let cursor = coll.find(doc! { "x": 1 }).run()?; /// # /// let results: Vec> = cursor.collect(); /// # Ok(()) @@ -92,11 +92,11 @@ impl Cursor { /// calling [`Cursor::advance`] first or after [`Cursor::advance`] returns an error / false. /// /// ``` - /// # use mongodb::{sync::Client, bson::Document, error::Result}; + /// # use mongodb::{sync::Client, bson::{Document, doc}, error::Result}; /// # fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017")?; /// # let coll = client.database("stuff").collection::("stuff"); - /// let mut cursor = coll.find(None, None)?; + /// let mut cursor = coll.find(doc! {}).run()?; /// while cursor.advance()? { /// println!("{:?}", cursor.deserialize_current()?); /// } @@ -115,11 +115,11 @@ impl Cursor { /// or without calling [`Cursor::advance`] at all may result in a panic. /// /// ``` - /// # use mongodb::{sync::Client, bson::Document, error::Result}; + /// # use mongodb::{sync::Client, bson::{doc, Document}, error::Result}; /// # fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017")?; /// # let coll = client.database("stuff").collection::("stuff"); - /// let mut cursor = coll.find(None, None)?; + /// let mut cursor = coll.find(doc! {}).run()?; /// while cursor.advance()? { /// println!("{:?}", cursor.current()); /// } @@ -138,7 +138,7 @@ impl Cursor { /// true or without calling [`Cursor::advance`] at all may result in a panic. /// /// ``` - /// # use mongodb::{sync::Client, error::Result}; + /// # use mongodb::{sync::Client, error::Result, bson::doc}; /// # fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017")?; /// # let db = client.database("foo"); @@ -151,7 +151,7 @@ impl Cursor { /// } /// /// let coll = db.collection::("cat"); - /// let mut cursor = coll.find(None, None)?; + /// let mut cursor = coll.find(doc! {}).run()?; /// while cursor.advance()? { /// println!("{:?}", cursor.deserialize_current()?); /// } @@ -181,13 +181,13 @@ where /// one. To iterate, retrieve a [`SessionCursorIter]` using [`SessionCursor::iter`]: /// /// ```rust -/// # use mongodb::{bson::Document, sync::Client, error::Result}; +/// # use mongodb::{bson::{doc, Document}, sync::Client, error::Result}; /// # /// # fn do_stuff() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://example.com")?; /// # let mut session = client.start_session().run()?; /// # let coll = client.database("foo").collection::("bar"); -/// # let mut cursor = coll.find_with_session(None, None, &mut session)?; +/// # let mut cursor = coll.find(doc! {}).session(&mut session).run()?; /// # /// for doc in cursor.iter(&mut session) { /// println!("{}", doc?) @@ -220,12 +220,12 @@ impl SessionCursor { /// calling [`Cursor::advance`] first or after [`Cursor::advance`] returns an error / false. /// /// ``` - /// # use mongodb::{sync::Client, bson::Document, error::Result}; + /// # use mongodb::{sync::Client, bson::{doc, Document}, error::Result}; /// # fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017")?; /// # let mut session = client.start_session().run()?; /// # let coll = client.database("stuff").collection::("stuff"); - /// let mut cursor = coll.find_with_session(None, None, &mut session)?; + /// let mut cursor = coll.find(doc! {}).session(&mut session).run()?; /// while cursor.advance(&mut session)? { /// println!("{:?}", cursor.deserialize_current()?); /// } @@ -245,12 +245,12 @@ impl SessionCursor { /// or without calling [`Cursor::advance`] at all may result in a panic. /// /// ``` - /// # use mongodb::{sync::Client, bson::Document, error::Result}; + /// # use mongodb::{sync::Client, bson::{doc, Document}, error::Result}; /// # fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017")?; /// # let mut session = client.start_session().run()?; /// # let coll = client.database("stuff").collection::("stuff"); - /// let mut cursor = coll.find_with_session(None, None, &mut session)?; + /// let mut cursor = coll.find(doc! {}).session(&mut session).run()?; /// while cursor.advance(&mut session)? { /// println!("{:?}", cursor.current()); /// } @@ -269,7 +269,7 @@ impl SessionCursor { /// true or without calling [`Cursor::advance`] at all may result in a panic. /// /// ``` - /// # use mongodb::{sync::Client, error::Result}; + /// # use mongodb::{sync::Client, error::Result, bson::doc}; /// # fn foo() -> Result<()> { /// # let client = Client::with_uri_str("mongodb://localhost:27017")?; /// # let mut session = client.start_session().run()?; @@ -283,7 +283,7 @@ impl SessionCursor { /// } /// /// let coll = db.collection::("cat"); - /// let mut cursor = coll.find_with_session(None, None, &mut session)?; + /// let mut cursor = coll.find(doc! {}).session(&mut session).run()?; /// while cursor.advance(&mut session)? { /// println!("{:?}", cursor.deserialize_current()?); /// } @@ -327,9 +327,9 @@ where /// # let coll = client.database("foo").collection::("bar"); /// # let other_coll = coll.clone(); /// # let mut session = client.start_session().run()?; - /// let mut cursor = coll.find_with_session(doc! { "x": 1 }, None, &mut session)?; + /// let mut cursor = coll.find(doc! { "x": 1 }).session(&mut session).run()?; /// while let Some(doc) = cursor.next(&mut session).transpose()? { - /// other_coll.insert_one_with_session(doc, None, &mut session)?; + /// other_coll.insert_one(doc).session(&mut session).run()?; /// } /// # Ok::<(), mongodb::error::Error>(()) /// # } diff --git a/src/sync/db.rs b/src/sync/db.rs index e10e2fbdf..970470a61 100644 --- a/src/sync/db.rs +++ b/src/sync/db.rs @@ -80,7 +80,7 @@ impl Database { /// /// This method does not send or receive anything across the wire to the database, so it can be /// used repeatedly without incurring any costs from I/O. - pub fn collection(&self, name: &str) -> Collection { + pub fn collection(&self, name: &str) -> Collection { Collection::new(self.async_database.collection(name)) } @@ -90,7 +90,7 @@ impl Database { /// /// This method does not send or receive anything across the wire to the database, so it can be /// used repeatedly without incurring any costs from I/O. - pub fn collection_with_options( + pub fn collection_with_options( &self, name: &str, options: CollectionOptions, diff --git a/src/sync/gridfs.rs b/src/sync/gridfs.rs index 844e9fdef..88389dddc 100644 --- a/src/sync/gridfs.rs +++ b/src/sync/gridfs.rs @@ -4,23 +4,15 @@ use std::io::{Read, Write}; use futures_util::{AsyncReadExt, AsyncWriteExt}; -use super::Cursor; use crate::{ - bson::{Bson, Document}, + bson::Bson, error::Result, gridfs::{ GridFsBucket as AsyncGridFsBucket, GridFsDownloadStream as AsyncGridFsDownloadStream, GridFsUploadStream as AsyncGridFsUploadStream, }, - options::{ - GridFsDownloadByNameOptions, - GridFsFindOptions, - GridFsUploadOptions, - ReadConcern, - SelectionCriteria, - WriteConcern, - }, + options::{ReadConcern, SelectionCriteria, WriteConcern}, }; pub use crate::gridfs::FilesCollectionDocument; @@ -35,7 +27,7 @@ pub use crate::gridfs::FilesCollectionDocument; /// `GridFsBucket` uses [`std::sync::Arc`] internally, so it can be shared safely across threads or /// async tasks. pub struct GridFsBucket { - async_bucket: AsyncGridFsBucket, + pub(crate) async_bucket: AsyncGridFsBucket, } impl GridFsBucket { @@ -57,35 +49,6 @@ impl GridFsBucket { pub fn selection_criteria(&self) -> Option<&SelectionCriteria> { self.async_bucket.selection_criteria() } - - /// Deletes the [`FilesCollectionDocument`] with the given `id` and its associated chunks from - /// this bucket. This method returns an error if the `id` does not match any files in the - /// bucket. - pub fn delete(&self, id: Bson) -> Result<()> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_bucket.delete(id)) - } - - /// Finds the [`FilesCollectionDocument`]s in the bucket matching the given `filter`. - pub fn find( - &self, - filter: Document, - options: impl Into>, - ) -> Result> { - crate::sync::TOKIO_RUNTIME - .block_on(self.async_bucket.find(filter, options)) - .map(Cursor::new) - } - - /// Renames the file with the given `id` to `new_filename`. This method returns an error if the - /// `id` does not match any files in the bucket. - pub fn rename(&self, id: Bson, new_filename: impl AsRef) -> Result<()> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_bucket.rename(id, new_filename)) - } - - /// Removes all of the files and their associated chunks from this bucket. - pub fn drop(&self) -> Result<()> { - crate::sync::TOKIO_RUNTIME.block_on(self.async_bucket.drop()) - } } /// A stream from which a file stored in a GridFS bucket can be downloaded. @@ -99,11 +62,14 @@ impl GridFsBucket { /// use std::io::Read; /// /// let mut buf = Vec::new(); -/// let mut download_stream = bucket.open_download_stream(id)?; +/// let mut download_stream = bucket.open_download_stream(id).run()?; /// download_stream.read_to_end(&mut buf)?; /// # Ok(()) /// # } /// ``` +/// +/// If the destination is a local file (or other `Write` byte sink), the contents of the stream +/// can be efficiently written to it with [`std::io::copy`]. pub struct GridFsDownloadStream { async_stream: AsyncGridFsDownloadStream, } @@ -115,42 +81,11 @@ impl Read for GridFsDownloadStream { } impl GridFsDownloadStream { - fn new(async_stream: AsyncGridFsDownloadStream) -> Self { + pub(crate) fn new(async_stream: AsyncGridFsDownloadStream) -> Self { Self { async_stream } } } -// Download API -impl GridFsBucket { - /// Opens and returns a [`GridFsDownloadStream`] from which the application can read - /// the contents of the stored file specified by `id`. - pub fn open_download_stream(&self, id: Bson) -> Result { - crate::sync::TOKIO_RUNTIME - .block_on(self.async_bucket.open_download_stream(id)) - .map(GridFsDownloadStream::new) - } - - /// Opens and returns a [`GridFsDownloadStream`] from which the application can read - /// the contents of the stored file specified by `filename`. - /// - /// If there are multiple files in the bucket with the given filename, the `revision` in the - /// options provided is used to determine which one to download. See the documentation for - /// [`GridFsDownloadByNameOptions`] for details on how to specify a revision. If no revision is - /// provided, the file with `filename` most recently uploaded will be downloaded. - pub fn open_download_stream_by_name( - &self, - filename: impl AsRef, - options: impl Into>, - ) -> Result { - crate::sync::TOKIO_RUNTIME - .block_on( - self.async_bucket - .open_download_stream_by_name(filename, options), - ) - .map(GridFsDownloadStream::new) - } -} - /// A stream to which bytes can be written to be uploaded to a GridFS bucket. /// /// # Uploading to the Stream @@ -167,13 +102,16 @@ impl GridFsBucket { /// use std::io::Write; /// /// let bytes = vec![0u8; 100]; -/// let mut upload_stream = bucket.open_upload_stream("example_file", None); +/// let mut upload_stream = bucket.open_upload_stream("example_file").run()?; /// upload_stream.write_all(&bytes[..])?; /// upload_stream.close()?; /// # Ok(()) /// # } /// ``` /// +/// If the data is a local file (or other `Read` byte source), its contents can be efficiently +/// written to the stream with [`std::io::copy`]. +/// /// # Aborting the Stream /// A stream can be aborted by calling the `abort` method. This will remove any chunks associated /// with the stream from the chunks collection. It is an error to write to, abort, or close the @@ -185,7 +123,7 @@ impl GridFsBucket { /// use std::io::Write; /// /// let bytes = vec![0u8; 100]; -/// let mut upload_stream = bucket.open_upload_stream("example_file", None); +/// let mut upload_stream = bucket.open_upload_stream("example_file").run()?; /// upload_stream.write_all(&bytes[..])?; /// upload_stream.abort()?; /// # Ok(()) @@ -210,6 +148,10 @@ pub struct GridFsUploadStream { } impl GridFsUploadStream { + pub(crate) fn new(async_stream: AsyncGridFsUploadStream) -> Self { + Self { async_stream } + } + /// Gets the stream's unique [`Bson`] identifier. This value will be the `id` field for the /// [`FilesCollectionDocument`] uploaded to the files collection when the stream is closed. pub fn id(&self) -> &Bson { @@ -241,34 +183,3 @@ impl Write for GridFsUploadStream { crate::sync::TOKIO_RUNTIME.block_on(self.async_stream.flush()) } } - -// Upload API -impl GridFsBucket { - /// Creates and returns a [`GridFsUploadStream`] that the application can write the contents of - /// the file to. This method generates a unique [`ObjectId`](crate::bson::oid::ObjectId) for the - /// corresponding [`FilesCollectionDocument`]'s `id` field that can be accessed via the - /// stream's `id` method. - pub fn open_upload_stream( - &self, - filename: impl AsRef, - options: impl Into>, - ) -> GridFsUploadStream { - let async_stream = self.async_bucket.open_upload_stream(filename, options); - GridFsUploadStream { async_stream } - } - - /// Opens a [`GridFsUploadStream`] that the application can write the contents of the file to. - /// The provided `id` will be used for the corresponding [`FilesCollectionDocument`]'s `id` - /// field. - pub fn open_upload_stream_with_id( - &self, - id: Bson, - filename: impl AsRef, - options: impl Into>, - ) -> GridFsUploadStream { - let async_stream = self - .async_bucket - .open_upload_stream_with_id(id, filename, options); - GridFsUploadStream { async_stream } - } -} diff --git a/src/sync/test.rs b/src/sync/test.rs index 20ef8421b..cf95e1087 100644 --- a/src/sync/test.rs +++ b/src/sync/test.rs @@ -29,7 +29,11 @@ fn init_db_and_coll(client: &Client, db_name: &str, coll_name: &str) -> Collecti coll } -fn init_db_and_typed_coll(client: &Client, db_name: &str, coll_name: &str) -> Collection { +fn init_db_and_typed_coll( + client: &Client, + db_name: &str, + coll_name: &str, +) -> Collection { let coll = client.database(db_name).collection(coll_name); coll.drop().run().unwrap(); coll @@ -65,7 +69,8 @@ fn client() { client .database(function_name!()) .collection(function_name!()) - .insert_one(Document::new(), None) + .insert_one(Document::new()) + .run() .expect("insert should succeed"); let db_names = client @@ -112,7 +117,8 @@ fn database() { let coll = init_db_and_coll(&client, function_name!(), function_name!()); - coll.insert_one(doc! { "x": 1 }, None) + coll.insert_one(doc! { "x": 1 }) + .run() .expect("insert should succeed"); let coll_names = db @@ -155,12 +161,14 @@ fn collection() { let client = Client::with_options(options).expect("client creation should succeed"); let coll = init_db_and_coll(&client, function_name!(), function_name!()); - coll.insert_one(doc! { "x": 1 }, None) + coll.insert_one(doc! { "x": 1 }) + .run() .expect("insert should succeed"); - let find_options = FindOptions::builder().projection(doc! { "_id": 0 }).build(); let cursor = coll - .find(doc! { "x": 1 }, find_options) + .find(doc! { "x": 1 }) + .projection(doc! { "_id": 0 }) + .run() .expect("find should succeed"); let results = cursor .collect::>>() @@ -217,7 +225,7 @@ fn typed_collection() { str: "hello".into(), }; - assert!(coll.insert_one(my_type, None).is_ok()); + assert!(coll.insert_one(my_type).run().is_ok()); } #[test] @@ -244,7 +252,7 @@ fn transactions() { if error.contains_label(TRANSIENT_TRANSACTION_ERROR) { continue; } else { - session.abort_transaction()?; + session.abort_transaction().run()?; return Err(error); } } @@ -267,17 +275,18 @@ fn transactions() { .expect("create collection should succeed"); session - .start_transaction(None) + .start_transaction() + .run() .expect("start transaction should succeed"); run_transaction_with_retry(&mut session, |s| { - coll.insert_one_with_session(doc! { "x": 1 }, None, s)?; + coll.insert_one(doc! { "x": 1 }).session(s).run()?; Ok(()) }) .unwrap(); loop { - match session.commit_transaction() { + match session.commit_transaction().run() { Ok(()) => { break; } @@ -292,15 +301,17 @@ fn transactions() { } session - .start_transaction(None) + .start_transaction() + .run() .expect("start transaction should succeed"); run_transaction_with_retry(&mut session, |s| { - coll.insert_one_with_session(doc! { "x": 1 }, None, s)?; + coll.insert_one(doc! { "x": 1 }).session(s).run()?; Ok(()) }) .unwrap(); session .abort_transaction() + .run() .expect("abort transaction should succeed"); } @@ -320,7 +331,7 @@ fn collection_generic_bounds() { let coll: Collection = client .database(function_name!()) .collection(function_name!()); - let _result: Result> = coll.find_one(None, None); + let _result: Result> = coll.find_one(doc! {}).run(); #[derive(Serialize)] struct Bar; @@ -329,7 +340,7 @@ fn collection_generic_bounds() { let coll: Collection = client .database(function_name!()) .collection(function_name!()); - let _result = coll.insert_one(Bar {}, None); + let _result = coll.insert_one(Bar {}); } #[test] @@ -373,13 +384,17 @@ fn borrowed_deserialization() { Doc { id: 5, foo: "1" }, ]; - coll.insert_many(&docs, None).unwrap(); + coll.insert_many(&docs).run().unwrap(); let options = FindOptions::builder() .batch_size(2) .sort(doc! { "_id": 1 }) .build(); - let mut cursor = coll.find(None, options.clone()).unwrap(); + let mut cursor = coll + .find(doc! {}) + .with_options(options.clone()) + .run() + .unwrap(); let mut i = 0; while cursor.advance().unwrap() { @@ -389,7 +404,12 @@ fn borrowed_deserialization() { } let mut session = client.start_session().run().unwrap(); - let mut cursor = coll.find_with_session(None, options, &mut session).unwrap(); + let mut cursor = coll + .find(doc! {}) + .with_options(options) + .session(&mut session) + .run() + .unwrap(); let mut i = 0; while cursor.advance(&mut session).unwrap() { @@ -410,13 +430,14 @@ fn mixed_sync_and_async() -> Result<()> { sync_db.drop().run()?; sync_db .collection::(COLL_NAME) - .insert_one(doc! { "a": 1 }, None)?; + .insert_one(doc! { "a": 1 }) + .run()?; let mut found = crate::sync::TOKIO_RUNTIME .block_on(async { async_client .database(DB_NAME) .collection::(COLL_NAME) - .find_one(doc! {}, None) + .find_one(doc! {}) .await })? .unwrap(); @@ -434,12 +455,13 @@ fn gridfs() { let upload = vec![0u8; 100]; let mut download = vec![]; - let mut upload_stream = bucket.open_upload_stream("sync gridfs", None); + let mut upload_stream = bucket.open_upload_stream("sync gridfs").run().unwrap(); upload_stream.write_all(&upload[..]).unwrap(); upload_stream.close().unwrap(); let mut download_stream = bucket .open_download_stream(upload_stream.id().clone()) + .run() .unwrap(); download_stream.read_to_end(&mut download).unwrap(); diff --git a/src/test.rs b/src/test.rs index 945617534..ad112504b 100644 --- a/src/test.rs +++ b/src/test.rs @@ -1,3 +1,6 @@ +#![allow(clippy::cast_possible_truncation)] +#![allow(clippy::cast_possible_wrap)] + mod atlas_connectivity; mod atlas_planned_maintenance_testing; #[cfg(feature = "aws-auth")] @@ -6,6 +9,12 @@ mod bulk_write; mod change_stream; mod client; mod coll; +#[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" +))] +mod compression; #[cfg(feature = "in-use-encryption-unstable")] mod csfle; mod cursor; @@ -19,6 +28,8 @@ pub(crate) mod util; #[cfg(feature = "in-use-encryption-unstable")] pub(crate) use self::csfle::{KmsProviderList, KMS_PROVIDERS_MAP}; +#[allow(deprecated)] +pub(crate) use self::util::EventClient; pub(crate) use self::{ spec::{run_spec_test, RunOn, Serverless, Topology}, util::{ @@ -27,8 +38,6 @@ pub(crate) use self::{ file_level_log, log_uncaptured, Event, - EventClient, - EventHandler, FailPoint, FailPointMode, MatchErrExt, @@ -48,7 +57,7 @@ use crate::{ auth::Credential, options::{ServerApi, ServerApiVersion}, }, - options::{ClientOptions, Compressor}, + options::ClientOptions, }; use std::{fs::read_to_string, str::FromStr}; @@ -79,12 +88,6 @@ pub(crate) static LOAD_BALANCED_SINGLE_URI: Lazy> = Lazy::new(|| std::env::var("SINGLE_MONGOS_LB_URI").ok()); pub(crate) static LOAD_BALANCED_MULTIPLE_URI: Lazy> = Lazy::new(|| std::env::var("MULTI_MONGOS_LB_URI").ok()); -pub(crate) static ZSTD_COMPRESSION_ENABLED: Lazy = - Lazy::new(|| matches!(std::env::var("ZSTD_COMPRESSION_ENABLED"), Ok(s) if s == "true")); -pub(crate) static ZLIB_COMPRESSION_ENABLED: Lazy = - Lazy::new(|| matches!(std::env::var("ZLIB_COMPRESSION_ENABLED"), Ok(s) if s == "true")); -pub(crate) static SNAPPY_COMPRESSION_ENABLED: Lazy = - Lazy::new(|| matches!(std::env::var("SNAPPY_COMPRESSION_ENABLED"), Ok(s) if s == "true")); pub(crate) static SERVERLESS_ATLAS_USER: Lazy> = Lazy::new(|| std::env::var("SERVERLESS_ATLAS_USER").ok()); pub(crate) static SERVERLESS_ATLAS_PASSWORD: Lazy> = @@ -112,9 +115,14 @@ pub(crate) fn update_options_for_testing(options: &mut ClientOptions) { if options.server_api.is_none() { options.server_api = SERVER_API.clone(); } - if options.compressors.is_none() { - options.compressors = get_compressors(); - } + + #[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" + ))] + set_compressor(options); + if options.credential.is_none() && SERVERLESS_ATLAS_USER.is_some() { options.credential = Some( Credential::builder() @@ -125,35 +133,6 @@ pub(crate) fn update_options_for_testing(options: &mut ClientOptions) { } } -fn get_compressors() -> Option> { - #[allow(unused_mut)] - let mut compressors = vec![]; - - if *SNAPPY_COMPRESSION_ENABLED { - #[cfg(feature = "snappy-compression")] - compressors.push(Compressor::Snappy); - #[cfg(not(feature = "snappy-compression"))] - panic!("To use snappy compression, the \"snappy-compression\" feature flag must be set."); - } - if *ZLIB_COMPRESSION_ENABLED { - #[cfg(feature = "zlib-compression")] - compressors.push(Compressor::Zlib { level: None }); - #[cfg(not(feature = "zlib-compression"))] - panic!("To use zlib compression, the \"zlib-compression\" feature flag must be set."); - } - if *ZSTD_COMPRESSION_ENABLED { - #[cfg(feature = "zstd-compression")] - compressors.push(Compressor::Zstd { level: None }); - #[cfg(not(feature = "zstd-compression"))] - panic!("To use zstd compression, the \"zstd-compression\" feature flag must be set."); - } - if compressors.is_empty() { - None - } else { - Some(compressors) - } -} - fn get_default_uri() -> String { if let Some(uri) = LOAD_BALANCED_SINGLE_URI.clone() { if !uri.is_empty() { @@ -171,3 +150,25 @@ fn get_default_uri() -> String { } "mongodb://localhost:27017".to_string() } + +#[cfg(any( + feature = "zstd-compression", + feature = "zlib-compression", + feature = "snappy-compression" +))] +fn set_compressor(options: &mut ClientOptions) { + use crate::options::Compressor; + + #[cfg(feature = "zstd-compression")] + { + options.compressors = Some(vec![Compressor::Zstd { level: None }]); + } + #[cfg(feature = "zlib-compression")] + { + options.compressors = Some(vec![Compressor::Zlib { level: None }]); + } + #[cfg(feature = "snappy-compression")] + { + options.compressors = Some(vec![Compressor::Snappy]); + } +} diff --git a/src/test/atlas_connectivity.rs b/src/test/atlas_connectivity.rs index 01cbca813..6da943ffa 100644 --- a/src/test/atlas_connectivity.rs +++ b/src/test/atlas_connectivity.rs @@ -34,7 +34,7 @@ async fn run_test(uri_env_var: &str, resolver_config: Option) { .expect("hello should succeed"); let coll = db.collection::("test"); - coll.find_one(None, None) + coll.find_one(doc! {}) .await .expect("findOne should succeed"); } diff --git a/src/test/auth_aws.rs b/src/test/auth_aws.rs index 5363718a8..0bd75b82e 100644 --- a/src/test/auth_aws.rs +++ b/src/test/auth_aws.rs @@ -1,5 +1,7 @@ use std::env::{remove_var, set_var, var}; +use bson::doc; + use crate::{bson::Document, client::auth::aws::test_utils::*, test::DEFAULT_URI, Client}; use super::TestClient; @@ -9,7 +11,7 @@ async fn auth_aws() { let client = TestClient::new().await; let coll = client.database("aws").collection::("somecoll"); - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); } // The TestClient performs operations upon creation that trigger authentication, so the credential @@ -29,7 +31,7 @@ async fn credential_caching() { let client = get_client().await; let coll = client.database("aws").collection::("somecoll"); - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); assert!(cached_credential().await.is_some()); let now = bson::DateTime::now(); @@ -37,7 +39,7 @@ async fn credential_caching() { let client = get_client().await; let coll = client.database("aws").collection::("somecoll"); - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); assert!(cached_credential().await.is_some()); assert!(cached_expiration().await > now); @@ -45,7 +47,7 @@ async fn credential_caching() { let client = get_client().await; let coll = client.database("aws").collection::("somecoll"); - match coll.find_one(None, None).await { + match coll.find_one(doc! {}).await { Ok(_) => panic!( "find one should have failed with authentication error due to poisoned cached \ credential" @@ -54,7 +56,7 @@ async fn credential_caching() { } assert!(cached_credential().await.is_none()); - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); assert!(cached_credential().await.is_some()); } @@ -69,7 +71,7 @@ async fn credential_caching_environment_vars() { let client = get_client().await; let coll = client.database("aws").collection::("somecoll"); - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); assert!(cached_credential().await.is_some()); set_var("AWS_ACCESS_KEY_ID", cached_access_key_id().await); @@ -81,7 +83,7 @@ async fn credential_caching_environment_vars() { let client = get_client().await; let coll = client.database("aws").collection::("somecoll"); - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); assert!(cached_credential().await.is_none()); set_var("AWS_ACCESS_KEY_ID", "bad"); @@ -90,7 +92,7 @@ async fn credential_caching_environment_vars() { let client = get_client().await; let coll = client.database("aws").collection::("somecoll"); - match coll.find_one(None, None).await { + match coll.find_one(doc! {}).await { Ok(_) => panic!( "find one should have failed with authentication error due to poisoned environment \ variables" @@ -105,7 +107,7 @@ async fn credential_caching_environment_vars() { let client = get_client().await; let coll = client.database("aws").collection::("somecoll"); - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); assert!(cached_credential().await.is_some()); set_var("AWS_ACCESS_KEY_ID", "bad"); @@ -114,7 +116,7 @@ async fn credential_caching_environment_vars() { let client = get_client().await; let coll = client.database("aws").collection::("somecoll"); - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); remove_var("AWS_ACCESS_KEY_ID"); remove_var("AWS_SECRET_ACCESS_KEY"); diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 6e36c15d2..518dd1f1e 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -1,19 +1,17 @@ -use std::{sync::Arc, time::Duration}; - use rand::{ distributions::{Alphanumeric, DistString}, thread_rng, }; use crate::{ - bson::{doc, Document}, + bson::doc, error::ErrorKind, options::WriteModel, test::{ get_client_options, log_uncaptured, spec::unified_runner::run_unified_tests, - EventHandler, + util::event_buffer::EventBuffer, FailPoint, FailPointMode, TestClient, @@ -29,12 +27,11 @@ async fn run_unified() { #[tokio::test] async fn max_write_batch_size_batching() { - let handler = Arc::new(EventHandler::new()); + let event_buffer = EventBuffer::new(); let client = Client::test_builder() - .event_handler(handler.clone()) + .event_buffer(event_buffer.clone()) .build() .await; - let mut subscriber = handler.subscribe(); if client.server_version_lt(8, 0) { log_uncaptured("skipping max_write_batch_size_batching: bulkWrite requires 8.0+"); @@ -52,29 +49,28 @@ async fn max_write_batch_size_batching() { let result = client.bulk_write(models).await.unwrap(); assert_eq!(result.inserted_count as usize, max_write_batch_size + 1); - let (first_started, _) = subscriber - .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") - .await - .expect("no events observed"); - let first_len = first_started.command.get_array("ops").unwrap().len(); + let command_started_events = event_buffer.get_command_started_events(&["bulkWrite"]); + + let first_event = command_started_events + .get(0) + .expect("no first event observed"); + let first_len = first_event.command.get_array("ops").unwrap().len(); assert_eq!(first_len, max_write_batch_size); - let (second_started, _) = subscriber - .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") - .await - .expect("no events observed"); - let second_len = second_started.command.get_array("ops").unwrap().len(); + let second_event = command_started_events + .get(1) + .expect("no second event observed"); + let second_len = second_event.command.get_array("ops").unwrap().len(); assert_eq!(second_len, 1); } #[tokio::test] async fn max_message_size_bytes_batching() { - let handler = Arc::new(EventHandler::new()); + let event_buffer = EventBuffer::new(); let client = Client::test_builder() - .event_handler(handler.clone()) + .event_buffer(event_buffer.clone()) .build() .await; - let mut subscriber = handler.subscribe(); if client.server_version_lt(8, 0) { log_uncaptured("skipping max_message_size_bytes_batching: bulkWrite requires 8.0+"); @@ -95,18 +91,18 @@ async fn max_message_size_bytes_batching() { let result = client.bulk_write(models).await.unwrap(); assert_eq!(result.inserted_count as usize, num_models); - let (first_started, _) = subscriber - .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") - .await - .expect("no events observed"); - let first_len = first_started.command.get_array("ops").unwrap().len(); + let command_started_events = event_buffer.get_command_started_events(&["bulkWrite"]); + + let first_event = command_started_events + .get(0) + .expect("no first event observed"); + let first_len = first_event.command.get_array("ops").unwrap().len(); assert_eq!(first_len, num_models - 1); - let (second_started, _) = subscriber - .wait_for_successful_command_execution(Duration::from_millis(500), "bulkWrite") - .await - .expect("no events observed"); - let second_len = second_started.command.get_array("ops").unwrap().len(); + let second_event = command_started_events + .get(1) + .expect("no second event observed"); + let second_len = second_event.command.get_array("ops").unwrap().len(); assert_eq!(second_len, 1); } @@ -115,13 +111,12 @@ async fn write_concern_error_batches() { let mut options = get_client_options().await.clone(); options.retry_writes = Some(false); - let handler = Arc::new(EventHandler::new()); + let event_buffer = EventBuffer::new(); let client = Client::test_builder() .options(options) - .event_handler(handler.clone()) + .event_buffer(event_buffer.clone()) .build() .await; - let mut subscriber = handler.subscribe(); if client.server_version_lt(8, 0) { log_uncaptured("skipping write_concern_error_batches: bulkWrite requires 8.0+"); @@ -158,7 +153,11 @@ async fn write_concern_error_batches() { #[tokio::test] async fn write_error_batches() { - let client = TestClient::new().await; + let mut event_buffer = EventBuffer::new(); + let client = Client::test_builder() + .event_buffer(event_buffer.clone()) + .build() + .await; if client.server_version_lt(8, 0) { log_uncaptured("skipping write_error_batches: bulkWrite requires 8.0+"); @@ -170,7 +169,7 @@ async fn write_error_batches() { let document = doc! { "_id": 1 }; let collection = client.database("db").collection("coll"); collection.drop().await.unwrap(); - collection.insert_one(document.clone(), None).await.unwrap(); + collection.insert_one(document.clone()).await.unwrap(); let models = vec![ WriteModel::InsertOne { @@ -195,23 +194,30 @@ async fn write_error_batches() { max_write_batch_size + 1 ); - let error = client.bulk_write(models).await.unwrap_err(); + let command_started_events = event_buffer.get_command_started_events(&["bulkWrite"]); + assert_eq!(command_started_events.len(), 2); + + event_buffer.clear_cached_events(); + + let error = client.bulk_write(models).ordered(true).await.unwrap_err(); let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { panic!("Expected bulk write error, got {:?}", error); }; assert_eq!(bulk_write_error.write_errors.len(), 1); + + let command_started_events = event_buffer.get_command_started_events(&["bulkWrite"]); + assert_eq!(command_started_events.len(), 1); } #[tokio::test] async fn cursor_iteration() { - let handler = Arc::new(EventHandler::new()); + let event_buffer = EventBuffer::new(); let client = Client::test_builder() - .event_handler(handler.clone()) + .event_buffer(event_buffer.clone()) .build() .await; - let mut subscriber = handler.subscribe(); let max_bson_object_size = client.server_info.max_bson_object_size as usize; let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; @@ -220,8 +226,8 @@ async fn cursor_iteration() { let document = doc! { "_id": Alphanumeric.sample_string(&mut thread_rng(), id_size) }; client .database("bulk") - .collection::("write") - .insert_one(&document, None) + .collection("write") + .insert_one(document.clone()) .await .unwrap(); @@ -234,19 +240,13 @@ async fn cursor_iteration() { ]; let error = client.bulk_write(models).ordered(false).await.unwrap_err(); - assert!(error.source.is_none()); - let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { panic!("Expected bulk write error, got {:?}", error); }; - assert!(bulk_write_error.write_concern_errors.is_empty()); - let write_errors = bulk_write_error.write_errors; assert_eq!(write_errors.len(), max_write_batch_size); - subscriber - .wait_for_successful_command_execution(Duration::from_millis(500), "getMore") - .await - .expect("no getMore observed"); + let command_started_events = event_buffer.get_command_started_events(&["getMore"]); + assert!(!command_started_events.is_empty()); } diff --git a/src/test/change_stream.rs b/src/test/change_stream.rs index 377020fea..a5df31b4f 100644 --- a/src/test/change_stream.rs +++ b/src/test/change_stream.rs @@ -17,10 +17,13 @@ use crate::{ Collection, }; -use super::{get_client_options, log_uncaptured, EventClient, TestClient}; +#[allow(deprecated)] +use super::EventClient; +use super::{get_client_options, log_uncaptured, TestClient}; type Result = std::result::Result>; +#[allow(deprecated)] async fn init_stream( coll_name: &str, direct_connection: bool, @@ -73,19 +76,23 @@ async fn tracks_resume_token() -> Result<()> { tokens.push(token.parsed()?); } for _ in 0..3 { - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; stream.next().await.transpose()?; tokens.push(stream.resume_token().unwrap().parsed()?); } - let events: Vec<_> = client - .get_command_events(&["aggregate", "getMore"]) - .into_iter() - .filter_map(|ev| match ev { - CommandEvent::Succeeded(s) => Some(s), - _ => None, - }) - .collect(); + #[allow(deprecated)] + let events: Vec<_> = { + let mut events = client.events.clone(); + events + .get_command_events(&["aggregate", "getMore"]) + .into_iter() + .filter_map(|ev| match ev { + CommandEvent::Succeeded(s) => Some(s), + _ => None, + }) + .collect() + }; let mut expected = vec![]; // Token from `aggregate` if let Some(initial) = events[0] @@ -140,7 +147,7 @@ async fn errors_on_missing_token() -> Result<()> { .watch() .pipeline(vec![doc! { "$project": { "_id": 0 } }]) .await?; - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; assert!(stream.next().await.transpose().is_err()); Ok(()) @@ -155,7 +162,7 @@ async fn resumes_on_error() -> Result<()> { None => return Ok(()), }; - coll.insert_one(doc! { "_id": 1 }, None).await?; + coll.insert_one(doc! { "_id": 1 }).await?; assert!(matches!(stream.next().await.transpose()?, Some(ChangeStreamEvent { operation_type: OperationType::Insert, @@ -167,7 +174,7 @@ async fn resumes_on_error() -> Result<()> { let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); let _guard = client.configure_fail_point(fail_point).await?; - coll.insert_one(doc! { "_id": 2 }, None).await?; + coll.insert_one(doc! { "_id": 2 }).await?; assert!(matches!(stream.next().await.transpose()?, Some(ChangeStreamEvent { operation_type: OperationType::Insert, @@ -177,7 +184,8 @@ async fn resumes_on_error() -> Result<()> { )); // Assert that two `aggregate`s were issued, i.e. that a resume happened. - let events = client.get_command_started_events(&["aggregate"]); + #[allow(deprecated)] + let events = client.events.get_command_started_events(&["aggregate"]); assert_eq!(events.len(), 2); Ok(()) @@ -217,10 +225,14 @@ async fn empty_batch_not_closed() -> Result<()> { assert!(stream.next_if_any().await?.is_none()); - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; stream.next().await.transpose()?; - let events = client.get_command_events(&["aggregate", "getMore"]); + #[allow(deprecated)] + let events = { + let mut events = client.events.clone(); + events.get_command_events(&["aggregate", "getMore"]) + }; let cursor_id = match &events[1] { CommandEvent::Succeeded(CommandSucceededEvent { reply, .. }) => { reply.get_document("cursor")?.get_i64("id")? @@ -246,7 +258,7 @@ async fn resume_kill_cursor_error_suppressed() -> Result<()> { None => return Ok(()), }; - coll.insert_one(doc! { "_id": 1 }, None).await?; + coll.insert_one(doc! { "_id": 1 }).await?; assert!(matches!(stream.next().await.transpose()?, Some(ChangeStreamEvent { operation_type: OperationType::Insert, @@ -259,7 +271,7 @@ async fn resume_kill_cursor_error_suppressed() -> Result<()> { FailPoint::new(&["getMore", "killCursors"], FailPointMode::Times(1)).error_code(43); let _guard = client.configure_fail_point(fail_point).await?; - coll.insert_one(doc! { "_id": 2 }, None).await?; + coll.insert_one(doc! { "_id": 2 }).await?; assert!(matches!(stream.next().await.transpose()?, Some(ChangeStreamEvent { operation_type: OperationType::Insert, @@ -269,7 +281,8 @@ async fn resume_kill_cursor_error_suppressed() -> Result<()> { )); // Assert that two `aggregate`s were issued, i.e. that a resume happened. - let events = client.get_command_started_events(&["aggregate"]); + #[allow(deprecated)] + let events = client.events.get_command_started_events(&["aggregate"]); assert_eq!(events.len(), 2); Ok(()) @@ -299,10 +312,14 @@ async fn resume_start_at_operation_time() -> Result<()> { let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); let _guard = client.configure_fail_point(fail_point).await?; - coll.insert_one(doc! { "_id": 2 }, None).await?; + coll.insert_one(doc! { "_id": 2 }).await?; stream.next().await.transpose()?; - let events = client.get_command_events(&["aggregate"]); + #[allow(deprecated)] + let events = { + let mut events = client.events.clone(); + events.get_command_events(&["aggregate"]) + }; assert_eq!(events.len(), 4); fn has_saot(command: &Document) -> Result { @@ -345,7 +362,11 @@ async fn batch_end_resume_token() -> Result<()> { assert_eq!(stream.next_if_any().await?, None); let token = stream.resume_token().unwrap().parsed()?; - let commands = client.get_command_events(&["aggregate", "getMore"]); + #[allow(deprecated)] + let commands = { + let mut events = client.events.clone(); + events.get_command_events(&["aggregate", "getMore"]) + }; assert!(matches!(commands.last(), Some( CommandEvent::Succeeded(CommandSucceededEvent { reply, @@ -380,7 +401,7 @@ async fn batch_end_resume_token_legacy() -> Result<()> { assert_eq!(stream.resume_token(), None); // Case: end of batch - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; let expected_id = stream.next_if_any().await?.unwrap().id; assert_eq!(stream.next_if_any().await?, None); assert_eq!(stream.resume_token().as_ref(), Some(&expected_id)); @@ -411,7 +432,7 @@ async fn batch_mid_resume_token() -> Result<()> { } // If we're out of events, make some more. None => { - coll.insert_many((0..3).map(|_| doc! {}), None).await?; + coll.insert_many((0..3).map(|_| doc! {})).await?; } }; @@ -451,13 +472,13 @@ async fn aggregate_batch() -> Result<()> { } // Synthesize a resume token for the new stream to start at. - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; stream.next().await; let token = stream.resume_token().unwrap(); // Populate the initial batch of the new stream. - coll.insert_one(doc! {}, None).await?; - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; + coll.insert_one(doc! {}).await?; // Case: `start_after` is given let stream = coll.watch().start_after(token.clone()).await?; @@ -491,19 +512,20 @@ async fn resume_uses_start_after() -> Result<()> { return Ok(()); } - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; stream.next().await.transpose()?; let token = stream.resume_token().unwrap(); let mut stream = coll.watch().start_after(token.clone()).await?; // Create an event, and synthesize a resumable error when calling `getMore` for that event. - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); let _guard = client.configure_fail_point(fail_point).await?; stream.next().await.transpose()?; - let commands = client.get_command_started_events(&["aggregate"]); + #[allow(deprecated)] + let commands = client.events.get_command_started_events(&["aggregate"]); fn has_start_after(command: &Document) -> Result { let stage = command.get_array("pipeline")?[0] .as_document() @@ -542,23 +564,24 @@ async fn resume_uses_resume_after() -> Result<()> { return Ok(()); } - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; stream.next().await.transpose()?; let token = stream.resume_token().unwrap(); let mut stream = coll.watch().start_after(token.clone()).await?; // Create an event and read it. - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; stream.next().await.transpose()?; // Create an event, and synthesize a resumable error when calling `getMore` for that event. - coll.insert_one(doc! {}, None).await?; + coll.insert_one(doc! {}).await?; let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); let _guard = client.configure_fail_point(fail_point).await?; stream.next().await.transpose()?; - let commands = client.get_command_started_events(&["aggregate"]); + #[allow(deprecated)] + let commands = client.events.get_command_started_events(&["aggregate"]); fn has_resume_after(command: &Document) -> Result { let stage = command.get_array("pipeline")?[0] .as_document() @@ -629,7 +652,7 @@ async fn split_large_event() -> Result<()> { .await?; let coll = db.collection::("split_large_event"); - coll.insert_one(doc! { "value": "q".repeat(10 * 1024 * 1024) }, None) + coll.insert_one(doc! { "value": "q".repeat(10 * 1024 * 1024) }) .await?; let stream = coll .watch() diff --git a/src/test/client.rs b/src/test/client.rs index aa7e9b288..46635348c 100644 --- a/src/test/client.rs +++ b/src/test/client.rs @@ -1,11 +1,10 @@ -use std::{borrow::Cow, collections::HashMap, future::IntoFuture, sync::Arc, time::Duration}; +use std::{borrow::Cow, collections::HashMap, future::IntoFuture, time::Duration}; use bson::Document; use serde::{Deserialize, Serialize}; use crate::{ bson::{doc, Bson}, - coll::options::FindOptions, error::{CommandError, Error, ErrorKind}, event::{cmap::CmapEvent, sdam::SdamEvent}, hello::LEGACY_HELLO_COMMAND_NAME, @@ -15,9 +14,8 @@ use crate::{ test::{ get_client_options, log_uncaptured, - util::TestClient, + util::{event_buffer::EventBuffer, TestClient}, Event, - EventHandler, FailPoint, FailPointMode, SERVER_API, @@ -96,7 +94,7 @@ async fn connection_drop_during_read() { let db = client.database("test"); db.collection(function_name!()) - .insert_one(doc! { "x": 1 }, None) + .insert_one(doc! { "x": 1 }) .await .unwrap(); @@ -131,9 +129,11 @@ async fn server_selection_timeout_message() { tag_set.insert("asdfasdf".to_string(), "asdfadsf".to_string()); let unsatisfiable_read_preference = ReadPreference::Secondary { - options: ReadPreferenceOptions::builder() - .tag_sets(vec![tag_set]) - .build(), + options: Some( + ReadPreferenceOptions::builder() + .tag_sets(vec![tag_set]) + .build(), + ), }; let mut options = get_client_options().await.clone(); @@ -178,7 +178,7 @@ async fn list_databases() { let db = client.database(name); db.collection("foo") - .insert_one(doc! { "x": 1 }, None) + .insert_one(doc! { "x": 1 }) .await .unwrap(); } @@ -223,7 +223,7 @@ async fn list_database_names() { let db = client.database(name); db.collection("foo") - .insert_one(doc! { "x": 1 }, None) + .insert_one(doc! { "x": 1 }) .await .unwrap(); } @@ -603,7 +603,7 @@ async fn x509_auth() { client .database(function_name!()) .collection::(function_name!()) - .find_one(None, None) + .find_one(doc! {}) .await .unwrap(); } @@ -632,7 +632,7 @@ async fn plain_auth() { let client = Client::with_options(options).unwrap(); let coll = client.database("ldap").collection("test"); - let doc = coll.find_one(None, None).await.unwrap().unwrap(); + let doc = coll.find_one(doc! {}).await.unwrap().unwrap(); #[derive(Debug, Deserialize, PartialEq)] struct TestDocument { @@ -682,26 +682,27 @@ async fn retry_commit_txn_check_out() { setup_client .database("retry_commit_txn_check_out") .collection("retry_commit_txn_check_out") - .insert_one(doc! {}, None) + .insert_one(doc! {}) .await .unwrap(); let mut options = get_client_options().await.clone(); - let handler = Arc::new(EventHandler::new()); - options.cmap_event_handler = Some(handler.clone().into()); - options.sdam_event_handler = Some(handler.clone().into()); + let buffer = EventBuffer::new(); + options.cmap_event_handler = Some(buffer.handler()); + options.sdam_event_handler = Some(buffer.handler()); options.heartbeat_freq = Some(Duration::from_secs(120)); options.app_name = Some("retry_commit_txn_check_out".to_string()); let client = Client::with_options(options).unwrap(); let mut session = client.start_session().await.unwrap(); - session.start_transaction(None).await.unwrap(); + session.start_transaction().await.unwrap(); // transition transaction to "in progress" so that the commit // actually executes an operation. client .database("retry_commit_txn_check_out") .collection("retry_commit_txn_check_out") - .insert_one_with_session(doc! {}, None, &mut session) + .insert_one(doc! {}) + .session(&mut session) .await .unwrap(); @@ -710,7 +711,8 @@ async fn retry_commit_txn_check_out() { let fail_point = FailPoint::new(&["ping"], FailPointMode::Times(1)).error_code(11600); let _guard = setup_client.configure_fail_point(fail_point).await.unwrap(); - let mut subscriber = handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); client .database("foo") .run_command(doc! { "ping": 1 }) @@ -790,9 +792,9 @@ async fn manual_shutdown_with_nothing() { /// Verifies that `Client::shutdown` succeeds when resources have been dropped. #[tokio::test] async fn manual_shutdown_with_resources() { - let events = Arc::new(EventHandler::new()); + let events = EventBuffer::new(); let client = Client::test_builder() - .event_handler(Arc::clone(&events)) + .event_buffer(events.clone()) .build() .await; if !client.supports_transactions() { @@ -802,24 +804,22 @@ async fn manual_shutdown_with_resources() { let db = client.database("shutdown_test"); db.drop().await.unwrap(); let coll = db.collection::("test"); - coll.insert_many([doc! {}, doc! {}], None).await.unwrap(); + coll.insert_many([doc! {}, doc! {}]).await.unwrap(); let bucket = db.gridfs_bucket(None); // Scope to force drop of resources { // Exhausted cursors don't need cleanup, so make sure there's more than one batch to fetch - let _cursor = coll - .find(None, FindOptions::builder().batch_size(1).build()) - .await - .unwrap(); + let _cursor = coll.find(doc! {}).batch_size(1).await.unwrap(); // Similarly, sessions need an in-progress transaction to have cleanup. let mut session = client.start_session().await.unwrap(); - if session.start_transaction(None).await.is_err() { + if session.start_transaction().await.is_err() { // Transaction start can transiently fail; if so, just bail out of the test. log_uncaptured("Skipping manual_shutdown_with_resources: transaction start failed"); return; } if coll - .insert_one_with_session(doc! {}, None, &mut session) + .insert_one(doc! {}) + .session(&mut session) .await .is_err() { @@ -827,7 +827,7 @@ async fn manual_shutdown_with_resources() { log_uncaptured("Skipping manual_shutdown_with_resources: transaction operation failed"); return; } - let _stream = bucket.open_upload_stream("test", None); + let _stream = bucket.open_upload_stream("test").await.unwrap(); } let is_sharded = client.is_sharded(); client.into_client().shutdown().await; @@ -853,9 +853,9 @@ async fn manual_shutdown_immediate_with_nothing() { /// Verifies that `Client::shutdown_immediate` succeeds without waiting for resources. #[tokio::test] async fn manual_shutdown_immediate_with_resources() { - let events = Arc::new(EventHandler::new()); + let events = EventBuffer::new(); let client = Client::test_builder() - .event_handler(Arc::clone(&events)) + .event_buffer(events.clone()) .build() .await; if !client.supports_transactions() { @@ -865,23 +865,21 @@ async fn manual_shutdown_immediate_with_resources() { let db = client.database("shutdown_test"); db.drop().await.unwrap(); let coll = db.collection::("test"); - coll.insert_many([doc! {}, doc! {}], None).await.unwrap(); + coll.insert_many([doc! {}, doc! {}]).await.unwrap(); let bucket = db.gridfs_bucket(None); // Resources are scoped to past the `shutdown_immediate`. // Exhausted cursors don't need cleanup, so make sure there's more than one batch to fetch - let _cursor = coll - .find(None, FindOptions::builder().batch_size(1).build()) - .await - .unwrap(); + let _cursor = coll.find(doc! {}).batch_size(1).await.unwrap(); // Similarly, sessions need an in-progress transaction to have cleanup. let mut session = client.start_session().await.unwrap(); - session.start_transaction(None).await.unwrap(); - coll.insert_one_with_session(doc! {}, None, &mut session) + session.start_transaction().await.unwrap(); + coll.insert_one(doc! {}) + .session(&mut session) .await .unwrap(); - let _stream = bucket.open_upload_stream("test", None); + let _stream = bucket.open_upload_stream("test").await.unwrap(); client.into_client().shutdown().immediate(true).await; @@ -912,17 +910,13 @@ async fn find_one_and_delete_serde_consistency() { problematic: vec![0, 1, 2, 3, 4, 5, 6, 7], }; - coll.insert_one(&doc, None).await.unwrap(); - let rec: Foo = coll.find_one(doc! {}, None).await.unwrap().unwrap(); + coll.insert_one(&doc).await.unwrap(); + let rec: Foo = coll.find_one(doc! {}).await.unwrap().unwrap(); assert_eq!(doc.problematic, rec.problematic); - let rec: Foo = coll - .find_one_and_delete(doc! {}, None) - .await - .unwrap() - .unwrap(); + let rec: Foo = coll.find_one_and_delete(doc! {}).await.unwrap().unwrap(); assert_eq!(doc.problematic, rec.problematic); - let nothing = coll.find_one_and_delete(doc! {}, None).await.unwrap(); + let nothing = coll.find_one_and_delete(doc! {}).await.unwrap(); assert!(nothing.is_none()); } diff --git a/src/test/coll.rs b/src/test/coll.rs index 34571a503..73730722a 100644 --- a/src/test/coll.rs +++ b/src/test/coll.rs @@ -1,12 +1,13 @@ -use std::{fmt::Debug, sync::Arc, time::Duration}; +use std::{fmt::Debug, time::Duration}; -use crate::{test::EventHandler, Client, Namespace}; use bson::{rawdoc, RawDocumentBuf}; use futures::stream::{StreamExt, TryStreamExt}; use once_cell::sync::Lazy; use semver::VersionReq; use serde::{de::DeserializeOwned, Deserialize, Serialize}; +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ bson::{doc, to_document, Bson, Document}, error::{ErrorKind, Result, WriteFailure}, @@ -16,11 +17,9 @@ use crate::{ DeleteOptions, DropCollectionOptions, FindOneAndDeleteOptions, - FindOneOptions, FindOptions, Hint, IndexOptions, - InsertManyOptions, ReadConcern, ReadPreference, SelectionCriteria, @@ -30,10 +29,12 @@ use crate::{ test::{ get_client_options, log_uncaptured, - util::{drop_collection, EventClient, TestClient}, + util::{event_buffer::EventBuffer, TestClient}, }, + Client, Collection, IndexModel, + Namespace, }; #[tokio::test] @@ -71,7 +72,7 @@ async fn insert_err_details() { .await .unwrap(); - let wc_error_result = coll.insert_one(doc! { "test": 1 }, None).await; + let wc_error_result = coll.insert_one(doc! { "test": 1 }).await; match *wc_error_result.unwrap_err().kind { ErrorKind::Write(WriteFailure::WriteConcernError(ref wc_error)) => { match &wc_error.details { @@ -101,11 +102,11 @@ async fn count() { assert_eq!(coll.estimated_document_count().await.unwrap(), 0); - let _ = coll.insert_one(doc! { "x": 1 }, None).await.unwrap(); + let _ = coll.insert_one(doc! { "x": 1 }).await.unwrap(); assert_eq!(coll.estimated_document_count().await.unwrap(), 1); let result = coll - .insert_many((1..4).map(|i| doc! { "x": i }).collect::>(), None) + .insert_many((1..4).map(|i| doc! { "x": i }).collect::>()) .await .unwrap(); assert_eq!(result.inserted_ids.len(), 3); @@ -121,12 +122,12 @@ async fn find() { .await; let result = coll - .insert_many((0i32..5).map(|i| doc! { "x": i }).collect::>(), None) + .insert_many((0i32..5).map(|i| doc! { "x": i }).collect::>()) .await .unwrap(); assert_eq!(result.inserted_ids.len(), 5); - let mut cursor = coll.find(None, None).await.unwrap().enumerate(); + let mut cursor = coll.find(doc! {}).await.unwrap().enumerate(); while let Some((i, result)) = cursor.next().await { let doc = result.unwrap(); @@ -146,7 +147,7 @@ async fn update() { .await; let result = coll - .insert_many((0i32..5).map(|_| doc! { "x": 3 }).collect::>(), None) + .insert_many((0i32..5).map(|_| doc! { "x": 3 }).collect::>()) .await .unwrap(); assert_eq!(result.inserted_ids.len(), 5); @@ -183,7 +184,7 @@ async fn delete() { .await; let result = coll - .insert_many((0i32..5).map(|_| doc! { "x": 3 }).collect::>(), None) + .insert_many((0i32..5).map(|_| doc! { "x": 3 }).collect::>()) .await .unwrap(); assert_eq!(result.inserted_ids.len(), 5); @@ -204,10 +205,10 @@ async fn aggregate_out() { let db = client.database(function_name!()); let coll = db.collection(function_name!()); - drop_collection(&coll).await; + coll.drop().await.unwrap(); let result = coll - .insert_many((0i32..5).map(|n| doc! { "x": n }).collect::>(), None) + .insert_many((0i32..5).map(|n| doc! { "x": n }).collect::>()) .await .unwrap(); assert_eq!(result.inserted_ids.len(), 5); @@ -221,7 +222,7 @@ async fn aggregate_out() { }, doc! {"$out": out_coll.name()}, ]; - drop_collection(&out_coll).await; + out_coll.drop().await.unwrap(); coll.aggregate(pipeline.clone()).await.unwrap(); assert!(db @@ -230,7 +231,7 @@ async fn aggregate_out() { .unwrap() .into_iter() .any(|name| name.as_str() == out_coll.name())); - drop_collection(&out_coll).await; + out_coll.drop().await.unwrap(); // check that even with a batch size of 0, a new collection is created. coll.aggregate(pipeline).batch_size(0).await.unwrap(); @@ -242,8 +243,10 @@ async fn aggregate_out() { .any(|name| name.as_str() == out_coll.name())); } +#[allow(deprecated)] fn kill_cursors_sent(client: &EventClient) -> bool { !client + .events .get_command_started_events(&["killCursors"]) .is_empty() } @@ -255,21 +258,19 @@ async fn kill_cursors_on_drop() { let db = client.database(function_name!()); let coll = db.collection(function_name!()); - drop_collection(&coll).await; + coll.drop().await.unwrap(); - coll.insert_many(vec![doc! { "x": 1 }, doc! { "x": 2 }], None) + coll.insert_many(vec![doc! { "x": 1 }, doc! { "x": 2 }]) .await .unwrap(); + #[allow(deprecated)] let event_client = EventClient::new().await; let coll = event_client .database(function_name!()) .collection::(function_name!()); - let cursor = coll - .find(None, FindOptions::builder().batch_size(1).build()) - .await - .unwrap(); + let cursor = coll.find(doc! {}).batch_size(1).await.unwrap(); assert!(!kill_cursors_sent(&event_client)); @@ -290,21 +291,19 @@ async fn no_kill_cursors_on_exhausted() { let db = client.database(function_name!()); let coll = db.collection(function_name!()); - drop_collection(&coll).await; + coll.drop().await.unwrap(); - coll.insert_many(vec![doc! { "x": 1 }, doc! { "x": 2 }], None) + coll.insert_many(vec![doc! { "x": 1 }, doc! { "x": 2 }]) .await .unwrap(); + #[allow(deprecated)] let event_client = EventClient::new().await; let coll = event_client .database(function_name!()) .collection::(function_name!()); - let cursor = coll - .find(None, FindOptions::builder().build()) - .await - .unwrap(); + let cursor = coll.find(doc! {}).await.unwrap(); assert!(!kill_cursors_sent(&event_client)); @@ -383,11 +382,7 @@ async fn large_insert() { .init_db_and_coll(function_name!(), function_name!()) .await; assert_eq!( - coll.insert_many(docs, None) - .await - .unwrap() - .inserted_ids - .len(), + coll.insert_many(docs).await.unwrap().inserted_ids.len(), 35000 ); } @@ -429,10 +424,10 @@ async fn large_insert_unordered_with_errors() { let coll = client .init_db_and_coll(function_name!(), function_name!()) .await; - let options = InsertManyOptions::builder().ordered(false).build(); match *coll - .insert_many(docs, options) + .insert_many(docs) + .ordered(false) .await .expect_err("should get error") .kind @@ -467,10 +462,10 @@ async fn large_insert_ordered_with_errors() { let coll = client .init_db_and_coll(function_name!(), function_name!()) .await; - let options = InsertManyOptions::builder().ordered(true).build(); match *coll - .insert_many(docs, options) + .insert_many(docs) + .ordered(true) .await .expect_err("should get error") .kind @@ -501,7 +496,7 @@ async fn empty_insert() { .database(function_name!()) .collection::(function_name!()); match *coll - .insert_many(Vec::::new(), None) + .insert_many(Vec::::new()) .await .expect_err("should get error") .kind @@ -531,6 +526,7 @@ async fn find_allow_disk_use_not_specified() { #[function_name::named] async fn allow_disk_use_test(options: FindOptions, expected_value: Option) { + #[allow(deprecated)] let event_client = EventClient::new().await; if event_client.server_version_lt(4, 3) { log_uncaptured("skipping allow_disk_use_test due to server version < 4.3"); @@ -539,9 +535,10 @@ async fn allow_disk_use_test(options: FindOptions, expected_value: Option) let coll = event_client .database(function_name!()) .collection::(function_name!()); - coll.find(None, options).await.unwrap(); + coll.find(doc! {}).with_options(options).await.unwrap(); - let events = event_client.get_command_started_events(&["find"]); + #[allow(deprecated)] + let events = event_client.events.get_command_started_events(&["find"]); assert_eq!(events.len(), 1); let allow_disk_use = events[0].command.get_bool("allowDiskUse").ok(); @@ -558,6 +555,7 @@ async fn ns_not_found_suppression() { } async fn delete_hint_test(options: Option, name: &str) { + #[allow(deprecated)] let client = EventClient::new().await; let coll = client.database(name).collection::(name); let _: Result = coll @@ -565,7 +563,8 @@ async fn delete_hint_test(options: Option, name: &str) { .with_options(options.clone()) .await; - let events = client.get_command_started_events(&["delete"]); + #[allow(deprecated)] + let events = client.events.get_command_started_events(&["delete"]); assert_eq!(events.len(), 1); let event_hint = events[0].command.get_array("deletes").unwrap()[0] @@ -601,6 +600,7 @@ async fn delete_hint_not_specified() { } async fn find_one_and_delete_hint_test(options: Option, name: &str) { + #[allow(deprecated)] let client = EventClient::new().await; let req = VersionReq::parse(">= 4.2").unwrap(); @@ -610,9 +610,13 @@ async fn find_one_and_delete_hint_test(options: Option, } let coll = client.database(name).collection(name); - let _: Result> = coll.find_one_and_delete(doc! {}, options.clone()).await; + let _: Result> = coll + .find_one_and_delete(doc! {}) + .with_options(options.clone()) + .await; - let events = client.get_command_started_events(&["findAndModify"]); + #[allow(deprecated)] + let events = client.events.get_command_started_events(&["findAndModify"]); assert_eq!(events.len(), 1); let event_hint = events[0] @@ -651,15 +655,16 @@ async fn find_one_and_delete_hint_not_specified() { #[tokio::test] #[function_name::named] async fn find_one_and_delete_hint_server_version() { + #[allow(deprecated)] let client = EventClient::new().await; let coll = client .database(function_name!()) .collection::("coll"); - let options = FindOneAndDeleteOptions::builder() + let res = coll + .find_one_and_delete(doc! {}) .hint(Hint::Name(String::new())) - .build(); - let res = coll.find_one_and_delete(doc! {}, options).await; + .await; let req1 = VersionReq::parse("< 4.2").unwrap(); let req2 = VersionReq::parse("4.2.*").unwrap(); @@ -677,6 +682,7 @@ async fn find_one_and_delete_hint_server_version() { #[tokio::test] #[function_name::named] async fn no_read_preference_to_standalone() { + #[allow(deprecated)] let client = EventClient::new().await; if !client.is_standalone() { @@ -684,22 +690,22 @@ async fn no_read_preference_to_standalone() { return; } - let options = FindOneOptions::builder() + client + .database(function_name!()) + .collection::(function_name!()) + .find_one(doc! {}) .selection_criteria(SelectionCriteria::ReadPreference( ReadPreference::SecondaryPreferred { options: Default::default(), }, )) - .build(); - - client - .database(function_name!()) - .collection::(function_name!()) - .find_one(None, options) .await .unwrap(); - let command_started = client.get_successful_command_execution("find").0; + #[allow(deprecated)] + let mut events = client.events.clone(); + #[allow(deprecated)] + let command_started = events.get_successful_command_execution("find").0; assert!(!command_started.command.contains_key("$readPreference")); } @@ -742,9 +748,9 @@ async fn insert_one_and_find(coll: &Collection, insert_data: T) where T: Serialize + DeserializeOwned + Clone + PartialEq + Debug + Unpin + Send + Sync, { - coll.insert_one(insert_data.clone(), None).await.unwrap(); + coll.insert_one(insert_data.clone()).await.unwrap(); let result = coll - .find_one(to_document(&insert_data).unwrap(), None) + .find_one(to_document(&insert_data).unwrap()) .await .unwrap(); match result { @@ -773,11 +779,11 @@ async fn typed_insert_many() { str: "b".into(), }, ]; - coll.insert_many(insert_data.clone(), None).await.unwrap(); + coll.insert_many(insert_data.clone()).await.unwrap(); - let options = FindOptions::builder().sort(doc! { "x": 1 }).build(); let actual: Vec = coll - .find(doc! { "x": 2 }, options) + .find(doc! { "x": 2 }) + .sort(doc! { "x": 1 }) .await .unwrap() .try_collect() @@ -798,20 +804,20 @@ async fn typed_find_one_and_replace() { x: 1, str: "a".into(), }; - coll.insert_one(insert_data.clone(), None).await.unwrap(); + coll.insert_one(insert_data.clone()).await.unwrap(); let replacement = UserType { x: 2, str: "b".into(), }; let result = coll - .find_one_and_replace(doc! { "x": 1 }, replacement.clone(), None) + .find_one_and_replace(doc! { "x": 1 }, replacement.clone()) .await .unwrap() .unwrap(); assert_eq!(result, insert_data); - let result = coll.find_one(doc! { "x": 2 }, None).await.unwrap().unwrap(); + let result = coll.find_one(doc! { "x": 2 }).await.unwrap().unwrap(); assert_eq!(result, replacement); } @@ -831,12 +837,12 @@ async fn typed_replace_one() { x: 2, str: "b".into(), }; - coll.insert_one(insert_data, None).await.unwrap(); - coll.replace_one(doc! { "x": 1 }, replacement.clone(), None) + coll.insert_one(insert_data).await.unwrap(); + coll.replace_one(doc! { "x": 1 }, replacement.clone()) .await .unwrap(); - let result = coll.find_one(doc! { "x": 2 }, None).await.unwrap().unwrap(); + let result = coll.find_one(doc! { "x": 2 }).await.unwrap().unwrap(); assert_eq!(result, replacement); } @@ -852,17 +858,17 @@ async fn typed_returns() { x: 1, str: "a".into(), }; - coll.insert_one(insert_data.clone(), None).await.unwrap(); + coll.insert_one(insert_data.clone()).await.unwrap(); let result = coll - .find_one_and_update(doc! { "x": 1 }, doc! { "$inc": { "x": 1 } }, None) + .find_one_and_update(doc! { "x": 1 }, doc! { "$inc": { "x": 1 } }) .await .unwrap() .unwrap(); assert_eq!(result, insert_data); let result = coll - .find_one_and_delete(doc! { "x": 2 }, None) + .find_one_and_delete(doc! { "x": 2 }) .await .unwrap() .unwrap(); @@ -890,7 +896,7 @@ async fn count_documents_with_wc() { .database(function_name!()) .collection(function_name!()); - coll.insert_one(doc! {}, None).await.unwrap(); + coll.insert_one(doc! {}).await.unwrap(); coll.count_documents(doc! {}) .await @@ -900,6 +906,7 @@ async fn count_documents_with_wc() { #[tokio::test] #[function_name::named] async fn collection_options_inherited() { + #[allow(deprecated)] let client = EventClient::new().await; let read_concern = ReadConcern::majority(); @@ -915,18 +922,19 @@ async fn collection_options_inherited() { .database(function_name!()) .collection_with_options::(function_name!(), options); - coll.find(None, None).await.unwrap(); + coll.find(doc! {}).await.unwrap(); assert_options_inherited(&client, "find").await; - coll.find_one(None, None).await.unwrap(); + coll.find_one(doc! {}).await.unwrap(); assert_options_inherited(&client, "find").await; coll.count_documents(doc! {}).await.unwrap(); assert_options_inherited(&client, "aggregate").await; } +#[allow(deprecated)] async fn assert_options_inherited(client: &EventClient, command_name: &str) { - let events = client.get_command_started_events(&[command_name]); + let events = client.events.get_command_started_events(&[command_name]); let event = events.iter().last().unwrap(); assert!(event.command.contains_key("readConcern")); assert_eq!( @@ -958,7 +966,7 @@ async fn collection_generic_bounds() { let coll: Collection = client .database(function_name!()) .collection(function_name!()); - let _result: Result> = coll.find_one(None, None).await; + let _result: Result> = coll.find_one(doc! {}).await; #[derive(Serialize)] struct Bar; @@ -967,7 +975,7 @@ async fn collection_generic_bounds() { let coll: Collection = client .database(function_name!()) .collection(function_name!()); - let _result = coll.insert_one(Bar {}, None).await; + let _result = coll.insert_one(Bar {}).await; } /// Verify that a cursor with multiple batches whose last batch isn't full @@ -980,10 +988,10 @@ async fn cursor_batch_size() { .await; let doc = Document::new(); - coll.insert_many(vec![&doc; 10], None).await.unwrap(); + coll.insert_many(vec![&doc; 10]).await.unwrap(); let opts = FindOptions::builder().batch_size(3).build(); - let cursor_no_session = coll.find(doc! {}, opts.clone()).await.unwrap(); + let cursor_no_session = coll.find(doc! {}).with_options(opts.clone()).await.unwrap(); let docs: Vec<_> = cursor_no_session.try_collect().await.unwrap(); assert_eq!(docs.len(), 10); @@ -994,7 +1002,9 @@ async fn cursor_batch_size() { } let mut session = client.start_session().await.unwrap(); let mut cursor = coll - .find_with_session(doc! {}, opts.clone(), &mut session) + .find(doc! {}) + .with_options(opts.clone()) + .session(&mut session) .await .unwrap(); let mut docs = Vec::new(); @@ -1004,7 +1014,9 @@ async fn cursor_batch_size() { assert_eq!(docs.len(), 10); let mut cursor = coll - .find_with_session(doc! {}, opts, &mut session) + .find(doc! {}) + .with_options(opts) + .session(&mut session) .await .unwrap(); let docs: Vec<_> = cursor.stream(&mut session).try_collect().await.unwrap(); @@ -1031,13 +1043,13 @@ async fn invalid_utf8_response() { // a document containing a long string with multi-byte unicode characters. taken from a user // repro in RUBY-2560. let long_unicode_str_doc = doc! {"name": "(╯°□°)╯︵ ┻━┻(╯°□°)╯︵ ┻━┻(╯°□°)╯︵ ┻━┻(╯°□°)╯︵ ┻━┻(╯°□°)╯︵ ┻━┻(╯°□°)╯︵ ┻━┻"}; - coll.insert_one(&long_unicode_str_doc, None) + coll.insert_one(&long_unicode_str_doc) .await .expect("first insert of document should succeed"); // test triggering an invalid error message via an insert_one. let insert_err = coll - .insert_one(&long_unicode_str_doc, None) + .insert_one(&long_unicode_str_doc) .await .expect_err("second insert of document should fail") .kind; @@ -1045,14 +1057,14 @@ async fn invalid_utf8_response() { // test triggering an invalid error message via an insert_many. let insert_err = coll - .insert_many([&long_unicode_str_doc], None) + .insert_many([&long_unicode_str_doc]) .await .expect_err("second insert of document should fail") .kind; assert_duplicate_key_error_with_utf8_replacement(&insert_err); // test triggering an invalid error message via an update_one. - coll.insert_one(doc! {"x": 1}, None) + coll.insert_one(doc! {"x": 1}) .await .expect("inserting new document should succeed"); @@ -1073,7 +1085,7 @@ async fn invalid_utf8_response() { // test triggering an invalid error message via a replace_one. let replace_err = coll - .replace_one(doc! {"x": 1}, &long_unicode_str_doc, None) + .replace_one(doc! {"x": 1}, &long_unicode_str_doc) .await .expect_err("replacement with duplicate key should fail") .kind; @@ -1157,13 +1169,10 @@ async fn configure_human_readable_serialization() { non_human_readable_collection.drop().await.unwrap(); non_human_readable_collection - .insert_one( - Data { - id: 0, - s: StringOrBytes("non human readable!".into()), - }, - None, - ) + .insert_one(Data { + id: 0, + s: StringOrBytes("non human readable!".into()), + }) .await .unwrap(); @@ -1171,7 +1180,7 @@ async fn configure_human_readable_serialization() { // instead. let document_collection = non_human_readable_collection.clone_with_type::(); let doc = document_collection - .find_one(doc! { "id": 0 }, None) + .find_one(doc! { "id": 0 }) .await .unwrap() .unwrap(); @@ -1184,13 +1193,12 @@ async fn configure_human_readable_serialization() { id: 1, s: StringOrBytes("non human readable!".into()), }, - None, ) .await .unwrap(); let doc = document_collection - .find_one(doc! { "id": 1 }, None) + .find_one(doc! { "id": 1 }) .await .unwrap() .unwrap(); @@ -1206,20 +1214,17 @@ async fn configure_human_readable_serialization() { human_readable_collection.drop().await.unwrap(); human_readable_collection - .insert_one( - Data { - id: 0, - s: StringOrBytes("human readable!".into()), - }, - None, - ) + .insert_one(Data { + id: 0, + s: StringOrBytes("human readable!".into()), + }) .await .unwrap(); // Proper deserialization to a string demonstrates that the data was correctly serialized as a // string. human_readable_collection - .find_one(doc! { "id": 0 }, None) + .find_one(doc! { "id": 0 }) .await .unwrap(); @@ -1230,13 +1235,12 @@ async fn configure_human_readable_serialization() { id: 1, s: StringOrBytes("human readable!".into()), }, - None, ) .await .unwrap(); human_readable_collection - .find_one(doc! { "id": 1 }, None) + .find_one(doc! { "id": 1 }) .await .unwrap(); } @@ -1251,12 +1255,13 @@ async fn insert_many_document_sequences() { return; } - let handler = Arc::new(EventHandler::new()); + let buffer = EventBuffer::new(); let client = Client::test_builder() - .event_handler(handler.clone()) + .event_buffer(buffer.clone()) .build() .await; - let mut subscriber = handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); let max_object_size = client.server_info.max_bson_object_size; let max_message_size = client.server_info.max_message_size_bytes; @@ -1272,7 +1277,7 @@ async fn insert_many_document_sequences() { rawdoc! { "s": "a".repeat((max_object_size / 2) as usize) }, rawdoc! { "s": "b".repeat((max_object_size / 2) as usize) }, ]; - collection.insert_many(docs, None).await.unwrap(); + collection.insert_many(docs).await.unwrap(); let (started, _) = subscriber .wait_for_successful_command_execution(Duration::from_millis(500), "insert") @@ -1292,7 +1297,7 @@ async fn insert_many_document_sequences() { docs.push(doc); } let total_docs = docs.len(); - collection.insert_many(docs, None).await.unwrap(); + collection.insert_many(docs).await.unwrap(); let (first_started, _) = subscriber .wait_for_successful_command_execution(Duration::from_millis(500), "insert") diff --git a/src/test/compression.rs b/src/test/compression.rs new file mode 100644 index 000000000..c81300abe --- /dev/null +++ b/src/test/compression.rs @@ -0,0 +1,28 @@ +use crate::{options::Compressor, test::get_client_options}; + +// Verifies that a compressor is properly set when a compression feature flag is enabled. Actual +// compression behavior is tested by running the driver test suite with a compressor configured; +// this test just makes sure our setup is correct. +#[tokio::test] +async fn test_compression_enabled() { + let options = get_client_options().await; + let compressors = options + .compressors + .as_ref() + .expect("compressors client option should be set when compression is enabled"); + + #[cfg(feature = "zstd-compression")] + assert!(compressors + .iter() + .any(|compressor| matches!(compressor, Compressor::Zstd { .. }))); + + #[cfg(feature = "zlib-compression")] + assert!(compressors + .iter() + .any(|compressor| matches!(compressor, Compressor::Zlib { .. }))); + + #[cfg(feature = "snappy-compression")] + assert!(compressors + .iter() + .any(|compressor| matches!(compressor, Compressor::Snappy))); +} diff --git a/src/test/csfle.rs b/src/test/csfle.rs index b37c25d27..5435f57f7 100644 --- a/src/test/csfle.rs +++ b/src/test/csfle.rs @@ -39,23 +39,23 @@ use crate::{ Credential, FindOptions, IndexOptions, - InsertOneOptions, ReadConcern, TlsOptions, WriteConcern, }, runtime, - test::{Event, EventHandler}, + test::{util::event_buffer::EventBuffer, Event}, Client, Collection, IndexModel, Namespace, }; +#[allow(deprecated)] +use super::EventClient; use super::{ get_client_options, log_uncaptured, - EventClient, FailCommandOptions, FailPoint, FailPointMode, @@ -64,6 +64,7 @@ use super::{ type Result = anyhow::Result; +#[allow(deprecated)] async fn init_client() -> Result<(EventClient, Collection)> { let client = EventClient::new().await; let datakeys = client @@ -212,13 +213,13 @@ async fn custom_key_material() -> Result<()> { .key_material(key) .await?; let mut key_doc = datakeys - .find_one(doc! { "_id": id.clone() }, None) + .find_one(doc! { "_id": id.clone() }) .await? .unwrap(); datakeys.delete_one(doc! { "_id": id}).await?; let new_key_id = bson::Binary::from_uuid(bson::Uuid::from_bytes([0; 16])); key_doc.insert("_id", new_key_id.clone()); - datakeys.insert_one(key_doc, None).await?; + datakeys.insert_one(key_doc).await?; let encrypted = enc .encrypt( @@ -282,7 +283,8 @@ async fn data_key_double_encryption() -> Result<()> { )?; // Testing each provider: - let mut events = client.subscribe_to_events(); + #[allow(deprecated)] + let mut events = client.events.subscribe(); let provider_keys = [ ( KmsProvider::Aws, @@ -331,7 +333,7 @@ async fn data_key_double_encryption() -> Result<()> { let docs: Vec<_> = client .database("keyvault") .collection::("datakeys") - .find(doc! { "_id": datakey_id.clone() }, None) + .find(doc! { "_id": datakey_id.clone() }) .await? .try_collect() .await?; @@ -364,7 +366,7 @@ async fn data_key_double_encryption() -> Result<()> { }), ) .await; - assert!(found.is_some(), "no valid event found in {:?}", events); + assert!(found.is_some(), "no valid event found"); // Manually encrypt a value and automatically decrypt it. let encrypted = client_encryption @@ -378,12 +380,9 @@ async fn data_key_double_encryption() -> Result<()> { let coll = client_encrypted .database("db") .collection::("coll"); - coll.insert_one( - doc! { "_id": provider.name(), "value": encrypted.clone() }, - None, - ) - .await?; - let found = coll.find_one(doc! { "_id": provider.name() }, None).await?; + coll.insert_one(doc! { "_id": provider.name(), "value": encrypted.clone() }) + .await?; + let found = coll.find_one(doc! { "_id": provider.name() }).await?; assert_eq!( found.as_ref().and_then(|doc| doc.get("value")), Some(&Bson::String(format!("hello {}", provider.name()))), @@ -402,7 +401,7 @@ async fn data_key_double_encryption() -> Result<()> { // Attempt to auto-encrypt an already encrypted field. let result = coll - .insert_one(doc! { "encrypted_placeholder": encrypted }, None) + .insert_one(doc! { "encrypted_placeholder": encrypted }) .await; let err = result.unwrap_err(); assert!( @@ -438,7 +437,7 @@ async fn external_key_vault() -> Result<()> { // Setup: initialize db. let (client, datakeys) = init_client().await?; datakeys - .insert_one(load_testdata("external/external-key.json")?, None) + .insert_one(load_testdata("external/external-key.json")?) .await?; // Setup: test options. @@ -478,7 +477,7 @@ async fn external_key_vault() -> Result<()> { let result = client_encrypted .database("db") .collection::("coll") - .insert_one(doc! { "encrypted": "test" }, None) + .insert_one(doc! { "encrypted": "test" }) .await; if with_external_key_vault { let err = result.unwrap_err(); @@ -543,14 +542,15 @@ async fn bson_size_limits() -> Result<()> { .validator(doc! { "$jsonSchema": load_testdata("limits/limits-schema.json")? }) .await?; datakeys - .insert_one(load_testdata("limits/limits-key.json")?, None) + .insert_one(load_testdata("limits/limits-key.json")?) .await?; // Setup: encrypted client. let mut opts = get_client_options().await.clone(); - let handler = Arc::new(EventHandler::new()); - let mut events = handler.subscribe(); - opts.command_event_handler = Some(handler.clone().into()); + let buffer = EventBuffer::::new(); + #[allow(deprecated)] + let mut events = buffer.subscribe(); + opts.command_event_handler = Some(buffer.handler()); let client_encrypted = Client::encrypted_builder(opts, KV_NAMESPACE.clone(), LOCAL_KMS.clone())? .extra_options(EXTRA_OPTIONS.clone()) @@ -563,37 +563,31 @@ async fn bson_size_limits() -> Result<()> { // Tests // Test operation 1 - coll.insert_one( - doc! { - "_id": "over_2mib_under_16mib", - "unencrypted": "a".repeat(2097152), - }, - None, - ) + coll.insert_one(doc! { + "_id": "over_2mib_under_16mib", + "unencrypted": "a".repeat(2097152), + }) .await?; // Test operation 2 let mut doc: Document = load_testdata("limits/limits-doc.json")?; doc.insert("_id", "encryption_exceeds_2mib"); doc.insert("unencrypted", "a".repeat(2_097_152 - 2_000)); - coll.insert_one(doc, None).await?; + coll.insert_one(doc).await?; // Test operation 3 let value = "a".repeat(2_097_152); events.clear_events(Duration::from_millis(500)).await; - coll.insert_many( - vec![ - doc! { - "_id": "over_2mib_1", - "unencrypted": value.clone(), - }, - doc! { - "_id": "over_2mib_2", - "unencrypted": value, - }, - ], - None, - ) + coll.insert_many(vec![ + doc! { + "_id": "over_2mib_1", + "unencrypted": value.clone(), + }, + doc! { + "_id": "over_2mib_2", + "unencrypted": value, + }, + ]) .await?; let inserts = events .collect_events(Duration::from_millis(500), |ev| { @@ -613,7 +607,7 @@ async fn bson_size_limits() -> Result<()> { let mut doc2 = doc.clone(); doc2.insert("_id", "encryption_exceeds_2mib_2"); events.clear_events(Duration::from_millis(500)).await; - coll.insert_many(vec![doc, doc2], None).await?; + coll.insert_many(vec![doc, doc2]).await?; let inserts = events .collect_events(Duration::from_millis(500), |ev| { let ev = match ev.as_command_started_event() { @@ -630,13 +624,13 @@ async fn bson_size_limits() -> Result<()> { "_id": "under_16mib", "unencrypted": "a".repeat(16_777_216 - 2_000), }; - coll.insert_one(doc, None).await?; + coll.insert_one(doc).await?; // Test operation 6 let mut doc: Document = load_testdata("limits/limits-doc.json")?; doc.insert("_id", "encryption_exceeds_16mib"); doc.insert("unencrypted", "a".repeat(16_777_216 - 2_000)); - let result = coll.insert_one(doc, None).await; + let result = coll.insert_one(doc).await; let err = result.unwrap_err(); assert!( matches!(*err.kind, ErrorKind::Write(_)), @@ -682,7 +676,7 @@ async fn views_prohibited() -> Result<()> { let result = client_encrypted .database("db") .collection::("view") - .insert_one(doc! {}, None) + .insert_one(doc! {}) .await; let err = result.unwrap_err(); assert!( @@ -759,7 +753,7 @@ async fn run_corpus_test(local_schema: bool) -> Result<()> { "corpus/corpus-key-gcp.json", "corpus/corpus-key-kmip.json", ] { - datakeys.insert_one(load_testdata(f)?, None).await?; + datakeys.insert_one(load_testdata(f)?).await?; } // Setup: encrypted client and manual encryption. @@ -856,9 +850,9 @@ async fn run_corpus_test(local_schema: bool) -> Result<()> { let coll = client_encrypted .database("db") .collection::("coll"); - let id = coll.insert_one(corpus_copied, None).await?.inserted_id; + let id = coll.insert_one(corpus_copied).await?.inserted_id; let corpus_decrypted = coll - .find_one(doc! { "_id": id.clone() }, None) + .find_one(doc! { "_id": id.clone() }) .await? .expect("document lookup failed"); assert_eq!(corpus, corpus_decrypted); @@ -868,7 +862,7 @@ async fn run_corpus_test(local_schema: bool) -> Result<()> { let corpus_encrypted_actual = client .database("db") .collection::("coll") - .find_one(doc! { "_id": id }, None) + .find_one(doc! { "_id": id }) .await? .expect("encrypted document lookup failed"); for (name, field) in &corpus_encrypted_expected { @@ -1281,7 +1275,7 @@ async fn bypass_mongocryptd_via_shared_library() -> Result<()> { client_encrypted .database("db") .collection::("coll") - .insert_one(doc! { "unencrypted": "test" }, None) + .insert_one(doc! { "unencrypted": "test" }) .await?; // Test: mongocryptd not spawned. assert!(!client_encrypted.mongocryptd_spawned().await); @@ -1322,7 +1316,7 @@ async fn bypass_mongocryptd_via_bypass_spawn() -> Result<()> { let err = client_encrypted .database("db") .collection::("coll") - .insert_one(doc! { "encrypted": "test" }, None) + .insert_one(doc! { "encrypted": "test" }) .await .unwrap_err(); assert!(err.is_server_selection_error(), "unexpected error: {}", err); @@ -1357,7 +1351,7 @@ async fn bypass_mongocryptd_unencrypted_insert(bypass: Bypass) -> Result<()> { client_encrypted .database("db") .collection::("coll") - .insert_one(doc! { "unencrypted": "test" }, None) + .insert_one(doc! { "unencrypted": "test" }) .await?; // Test: mongocryptd not spawned. assert!(!client_encrypted.mongocryptd_spawned().await); @@ -1508,13 +1502,15 @@ impl DeadlockTestCase { async fn run(&self) -> Result<()> { // Setup let client_test = TestClient::new().await; + #[allow(deprecated)] let client_keyvault = EventClient::with_options({ let mut opts = get_client_options().await.clone(); opts.max_pool_size = Some(1); opts }) .await; - let mut keyvault_events = client_keyvault.subscribe_to_events(); + #[allow(deprecated)] + let mut keyvault_events = client_keyvault.events.subscribe(); client_test .database("keyvault") .collection::("datakeys") @@ -1528,12 +1524,8 @@ impl DeadlockTestCase { client_keyvault .database("keyvault") .collection::("datakeys") - .insert_one( - load_testdata("external/external-key.json")?, - InsertOneOptions::builder() - .write_concern(WriteConcern::majority()) - .build(), - ) + .insert_one(load_testdata("external/external-key.json")?) + .write_concern(WriteConcern::majority()) .await?; client_test .database("db") @@ -1554,12 +1546,13 @@ impl DeadlockTestCase { .await?; // Run test case - let event_handler = Arc::new(EventHandler::new()); - let mut encrypted_events = event_handler.subscribe(); + let event_buffer = EventBuffer::new(); + #[allow(deprecated)] + let mut encrypted_events = event_buffer.subscribe(); let mut opts = get_client_options().await.clone(); opts.max_pool_size = Some(self.max_pool_size); - opts.command_event_handler = Some(event_handler.clone().into()); - opts.sdam_event_handler = Some(event_handler.clone().into()); + opts.command_event_handler = Some(event_buffer.handler()); + opts.sdam_event_handler = Some(event_buffer.handler()); let client_encrypted = Client::encrypted_builder(opts, KV_NAMESPACE.clone(), LOCAL_KMS.clone())? .bypass_auto_encryption(self.bypass_auto_encryption) @@ -1579,20 +1572,20 @@ impl DeadlockTestCase { client_test .database("db") .collection::("coll") - .insert_one(doc! { "_id": 0, "encrypted": ciphertext }, None) + .insert_one(doc! { "_id": 0, "encrypted": ciphertext }) .await?; } else { client_encrypted .database("db") .collection::("coll") - .insert_one(doc! { "_id": 0, "encrypted": "string0" }, None) + .insert_one(doc! { "_id": 0, "encrypted": "string0" }) .await?; } let found = client_encrypted .database("db") .collection::("coll") - .find_one(doc! { "_id": 0 }, None) + .find_one(doc! { "_id": 0 }) .await?; assert_eq!(found, Some(doc! { "_id": 0, "encrypted": "string0" })); @@ -2005,7 +1998,7 @@ async fn explicit_encryption_case_1() -> Result<()> { .contention_factor(0) .await?; enc_coll - .insert_one(doc! { "encryptedIndexed": insert_payload }, None) + .insert_one(doc! { "encryptedIndexed": insert_payload }) .await?; let find_payload = testdata @@ -2019,7 +2012,7 @@ async fn explicit_encryption_case_1() -> Result<()> { .contention_factor(0) .await?; let found: Vec<_> = enc_coll - .find(doc! { "encryptedIndexed": find_payload }, None) + .find(doc! { "encryptedIndexed": find_payload }) .await? .try_collect() .await?; @@ -2063,7 +2056,7 @@ async fn explicit_encryption_case_2() -> Result<()> { .contention_factor(10) .await?; enc_coll - .insert_one(doc! { "encryptedIndexed": insert_payload }, None) + .insert_one(doc! { "encryptedIndexed": insert_payload }) .await?; } @@ -2078,7 +2071,7 @@ async fn explicit_encryption_case_2() -> Result<()> { .contention_factor(0) .await?; let found: Vec<_> = enc_coll - .find(doc! { "encryptedIndexed": find_payload }, None) + .find(doc! { "encryptedIndexed": find_payload }) .await? .try_collect() .await?; @@ -2098,7 +2091,7 @@ async fn explicit_encryption_case_2() -> Result<()> { .contention_factor(10) .await?; let found: Vec<_> = enc_coll - .find(doc! { "encryptedIndexed": find_payload2 }, None) + .find(doc! { "encryptedIndexed": find_payload2 }) .await? .try_collect() .await?; @@ -2138,14 +2131,11 @@ async fn explicit_encryption_case_3() -> Result<()> { ) .await?; enc_coll - .insert_one( - doc! { "_id": 1, "encryptedUnindexed": insert_payload }, - None, - ) + .insert_one(doc! { "_id": 1, "encryptedUnindexed": insert_payload }) .await?; let found: Vec<_> = enc_coll - .find(doc! { "_id": 1 }, None) + .find(doc! { "_id": 1 }) .await? .try_collect() .await?; @@ -2262,12 +2252,8 @@ async fn explicit_encryption_setup() -> Result("datakeys") - .insert_one( - key1_document, - InsertOneOptions::builder() - .write_concern(WriteConcern::majority()) - .build(), - ) + .insert_one(key1_document) + .write_concern(WriteConcern::majority()) .await?; let client_encryption = ClientEncryption::new( @@ -2489,7 +2475,7 @@ async fn decryption_events_decrypt_error() -> Result<()> { None => return Ok(()), }; td.decryption_events - .insert_one(doc! { "encrypted": td.malformed_ciphertext }, None) + .insert_one(doc! { "encrypted": td.malformed_ciphertext }) .await?; let err = td.decryption_events.aggregate(vec![]).await.unwrap_err(); assert!(err.is_csfle_error()); @@ -2520,7 +2506,7 @@ async fn decryption_events_decrypt_success() -> Result<()> { None => return Ok(()), }; td.decryption_events - .insert_one(doc! { "encrypted": td.ciphertext }, None) + .insert_one(doc! { "encrypted": td.ciphertext }) .await?; td.decryption_events.aggregate(vec![]).await?; let guard = td.ev_handler.succeeded.lock().unwrap(); @@ -2862,7 +2848,7 @@ async fn bypass_mongocryptd_client() -> Result<()> { client_encrypted .database("db") .collection::("coll") - .insert_one(doc! { "unencrypted": "test" }, None) + .insert_one(doc! { "unencrypted": "test" }) .await?; assert!(!client_encrypted.has_mongocryptd_client().await); @@ -2929,7 +2915,7 @@ async fn auto_encryption_keys(master_key: MasterKey) -> Result<()> { .await .1?; let coll = db.collection::("case_1"); - let result = coll.insert_one(doc! { "ssn": "123-45-6789" }, None).await; + let result = coll.insert_one(doc! { "ssn": "123-45-6789" }).await; assert!( result.as_ref().unwrap_err().code() == Some(121), "Expected error 121 (failed validation), got {:?}", @@ -2988,8 +2974,7 @@ async fn auto_encryption_keys(master_key: MasterKey) -> Result<()> { }; let encrypted_payload = ce.encrypt("123-45-6789", key, Algorithm::Unindexed).await?; let coll = db.collection::("case_1"); - coll.insert_one(doc! { "ssn": encrypted_payload }, None) - .await?; + coll.insert_one(doc! { "ssn": encrypted_payload }).await?; Ok(()) } @@ -3001,7 +2986,7 @@ async fn range_explicit_encryption() -> Result<()> { return Ok(()); } let client = TestClient::new().await; - if client.server_version_lt(6, 2) || client.is_standalone() { + if client.server_version_lt(6, 2) || client.server_version_gte(8, 0) || client.is_standalone() { log_uncaptured("Skipping range_explicit_encryption due to unsupported topology"); return Ok(()); } @@ -3106,12 +3091,8 @@ async fn range_explicit_encryption_test( .await?; datakeys_collection - .insert_one( - key1_document, - InsertOneOptions::builder() - .write_concern(WriteConcern::majority()) - .build(), - ) + .insert_one(key1_document) + .write_concern(WriteConcern::majority()) .await?; let key_vault_client = TestClient::new().await; @@ -3152,13 +3133,10 @@ async fn range_explicit_encryption_test( .await?; explicit_encryption_collection - .insert_one( - doc! { - &key: encrypted_value, - "_id": id as i32, - }, - None, - ) + .insert_one(doc! { + &key: encrypted_value, + "_id": id as i32, + }) .await?; } @@ -3206,7 +3184,8 @@ async fn range_explicit_encryption_test( .await?; let docs: Vec = explicit_encryption_collection - .find(find_payload, find_options.clone()) + .find(find_payload) + .with_options(find_options.clone()) .await? .try_collect() .await?; @@ -3228,7 +3207,8 @@ async fn range_explicit_encryption_test( let docs: Vec = encrypted_client .database("db") .collection("explicit_encryption") - .find(find_payload, find_options.clone()) + .find(find_payload) + .with_options(find_options.clone()) .await? .try_collect() .await?; @@ -3249,7 +3229,8 @@ async fn range_explicit_encryption_test( let docs: Vec = encrypted_client .database("db") .collection("explicit_encryption") - .find(find_payload, find_options.clone()) + .find(find_payload) + .with_options(find_options.clone()) .await? .try_collect() .await?; @@ -3266,7 +3247,8 @@ async fn range_explicit_encryption_test( let docs: Vec = encrypted_client .database("db") .collection("explicit_encryption") - .find(doc! { "$expr": find_payload }, find_options.clone()) + .find(doc! { "$expr": find_payload }) + .with_options(find_options.clone()) .await? .try_collect() .await?; @@ -3426,24 +3408,18 @@ async fn fle2_example() -> Result<()> { // Encrypt an insert. encrypted_coll - .insert_one( - doc! { - "_id": 1, - "encryptedIndexed": "indexedValue", - "encryptedUnindexed": "unindexedValue", - }, - None, - ) + .insert_one(doc! { + "_id": 1, + "encryptedIndexed": "indexedValue", + "encryptedUnindexed": "unindexedValue", + }) .await?; // Encrypt a find. let found = encrypted_coll - .find_one( - doc! { - "encryptedIndexed": "indexedValue", - }, - None, - ) + .find_one(doc! { + "encryptedIndexed": "indexedValue", + }) .await? .unwrap(); assert_eq!("indexedValue", found.get_str("encryptedIndexed")?); @@ -3453,10 +3429,7 @@ async fn fle2_example() -> Result<()> { let unencrypted_coll = test_client .database("docsExamples") .collection::("encrypted"); - let found = unencrypted_coll - .find_one(doc! { "_id": 1 }, None) - .await? - .unwrap(); + let found = unencrypted_coll.find_one(doc! { "_id": 1 }).await?.unwrap(); assert_eq!( Some(ElementType::Binary), found.get("encryptedIndexed").map(Bson::element_type) diff --git a/src/test/cursor.rs b/src/test/cursor.rs index 4eda10693..c77808b39 100644 --- a/src/test/cursor.rs +++ b/src/test/cursor.rs @@ -3,11 +3,13 @@ use std::time::Duration; use futures::{future::Either, StreamExt, TryStreamExt}; use serde::{Deserialize, Serialize}; +#[allow(deprecated)] +use crate::test::util::EventClient; use crate::{ bson::doc, options::{CreateCollectionOptions, CursorType, FindOptions}, runtime, - test::{log_uncaptured, util::EventClient, TestClient, SERVERLESS}, + test::{log_uncaptured, TestClient, SERVERLESS}, }; #[tokio::test] @@ -33,20 +35,16 @@ async fn tailable_cursor() { ) .await; - coll.insert_many((0..5).map(|i| doc! { "_id": i }), None) + coll.insert_many((0..5).map(|i| doc! { "_id": i })) .await .unwrap(); let await_time = Duration::from_millis(500); let mut cursor = coll - .find( - None, - FindOptions::builder() - .cursor_type(CursorType::TailableAwait) - .max_await_time(await_time) - .build(), - ) + .find(doc! {}) + .cursor_type(CursorType::TailableAwait) + .max_await_time(await_time) .await .unwrap(); @@ -70,7 +68,7 @@ async fn tailable_cursor() { }; runtime::spawn(async move { - coll.insert_one(doc! { "_id": 5 }, None).await.unwrap(); + coll.insert_one(doc! { "_id": 5 }).await.unwrap(); }); let delay = tokio::time::sleep(await_time); @@ -93,13 +91,15 @@ async fn session_cursor_next() { .create_fresh_collection(function_name!(), function_name!(), None) .await; - coll.insert_many_with_session((0..5).map(|i| doc! { "_id": i }), None, &mut session) + coll.insert_many((0..5).map(|i| doc! { "_id": i })) + .session(&mut session) .await .unwrap(); - let opts = FindOptions::builder().batch_size(1).build(); let mut cursor = coll - .find_with_session(None, opts, &mut session) + .find(doc! {}) + .batch_size(1) + .session(&mut session) .await .unwrap(); @@ -113,6 +113,7 @@ async fn session_cursor_next() { #[tokio::test] async fn batch_exhaustion() { + #[allow(deprecated)] let client = EventClient::new().await; let coll = client @@ -122,34 +123,32 @@ async fn batch_exhaustion() { None, ) .await; - coll.insert_many( - vec![ - doc! { "foo": 1 }, - doc! { "foo": 2 }, - doc! { "foo": 3 }, - doc! { "foo": 4 }, - doc! { "foo": 5 }, - doc! { "foo": 6 }, - ], - None, - ) + coll.insert_many(vec![ + doc! { "foo": 1 }, + doc! { "foo": 2 }, + doc! { "foo": 3 }, + doc! { "foo": 4 }, + doc! { "foo": 5 }, + doc! { "foo": 6 }, + ]) .await .unwrap(); // Start a find where batch size will line up with limit. - let cursor = coll - .find(None, FindOptions::builder().batch_size(2).limit(4).build()) - .await - .unwrap(); + let cursor = coll.find(doc! {}).batch_size(2).limit(4).await.unwrap(); let v: Vec<_> = cursor.try_collect().await.unwrap(); assert_eq!(4, v.len()); // Assert that the last `getMore` response always has id 0, i.e. is exhausted. - let replies: Vec<_> = client - .get_command_events(&["getMore"]) - .into_iter() - .filter_map(|e| e.as_command_succeeded().map(|e| e.reply.clone())) - .collect(); + #[allow(deprecated)] + let replies: Vec<_> = { + let mut events = client.events.clone(); + events + .get_command_events(&["getMore"]) + .into_iter() + .filter_map(|e| e.as_command_succeeded().map(|e| e.reply.clone())) + .collect() + }; let last = replies.last().unwrap(); let cursor = last.get_document("cursor").unwrap(); let id = cursor.get_i64("id").unwrap(); @@ -158,6 +157,7 @@ async fn batch_exhaustion() { #[tokio::test] async fn borrowed_deserialization() { + #[allow(deprecated)] let client = EventClient::new().await; #[derive(Serialize, Deserialize, Debug, PartialEq)] @@ -195,14 +195,18 @@ async fn borrowed_deserialization() { Doc { id: 5, foo: "1" }, ]; - coll.insert_many(&docs, None).await.unwrap(); + coll.insert_many(&docs).await.unwrap(); let options = FindOptions::builder() .batch_size(2) .sort(doc! { "_id": 1 }) .build(); - let mut cursor = coll.find(None, options.clone()).await.unwrap(); + let mut cursor = coll + .find(doc! {}) + .with_options(options.clone()) + .await + .unwrap(); let mut i = 0; while cursor.advance().await.unwrap() { @@ -213,7 +217,9 @@ async fn borrowed_deserialization() { let mut session = client.start_session().await.unwrap(); let mut cursor = coll - .find_with_session(None, options.clone(), &mut session) + .find(doc! {}) + .with_options(options.clone()) + .session(&mut session) .await .unwrap(); @@ -233,19 +239,14 @@ async fn session_cursor_with_type() { let coll = client.database("db").collection("coll"); coll.drop().session(&mut session).await.unwrap(); - coll.insert_many_with_session( - vec![doc! { "x": 1 }, doc! { "x": 2 }, doc! { "x": 3 }], - None, - &mut session, - ) - .await - .unwrap(); - - let mut cursor: crate::SessionCursor = coll - .find_with_session(doc! {}, None, &mut session) + coll.insert_many(vec![doc! { "x": 1 }, doc! { "x": 2 }, doc! { "x": 3 }]) + .session(&mut session) .await .unwrap(); + let mut cursor: crate::SessionCursor = + coll.find(doc! {}).session(&mut session).await.unwrap(); + let _ = cursor.next(&mut session).await.unwrap().unwrap(); let mut cursor_with_type: crate::SessionCursor = cursor.with_type(); @@ -259,23 +260,17 @@ async fn cursor_final_batch() { let coll = client .create_fresh_collection("test_cursor_final_batch", "test", None) .await; - coll.insert_many( - vec![ - doc! { "foo": 1 }, - doc! { "foo": 2 }, - doc! { "foo": 3 }, - doc! { "foo": 4 }, - doc! { "foo": 5 }, - ], - None, - ) + coll.insert_many(vec![ + doc! { "foo": 1 }, + doc! { "foo": 2 }, + doc! { "foo": 3 }, + doc! { "foo": 4 }, + doc! { "foo": 5 }, + ]) .await .unwrap(); - let mut cursor = coll - .find(None, FindOptions::builder().batch_size(3).build()) - .await - .unwrap(); + let mut cursor = coll.find(doc! {}).batch_size(3).await.unwrap(); let mut found = 0; while cursor.advance().await.unwrap() { found += 1; diff --git a/src/test/db.rs b/src/test/db.rs index b699294d3..c180d6083 100644 --- a/src/test/db.rs +++ b/src/test/db.rs @@ -2,6 +2,8 @@ use std::cmp::Ord; use futures::stream::TryStreamExt; +#[allow(deprecated)] +use crate::test::util::EventClient; use crate::{ action::Action, bson::{doc, Document}, @@ -14,7 +16,7 @@ use crate::{ ValidationLevel, }, results::{CollectionSpecification, CollectionType}, - test::util::{EventClient, TestClient}, + test::util::TestClient, Database, }; @@ -52,7 +54,7 @@ async fn list_collections() { for coll_name in coll_names { db.collection(coll_name) - .insert_one(doc! { "x": 1 }, None) + .insert_one(doc! { "x": 1 }) .await .unwrap(); } @@ -80,7 +82,7 @@ async fn list_collections_filter() { let coll_names = &["bar", "baz", "foo"]; for coll_name in coll_names { db.collection(coll_name) - .insert_one(doc! { "x": 1 }, None) + .insert_one(doc! { "x": 1 }) .await .unwrap(); } @@ -119,7 +121,7 @@ async fn list_collection_names() { for coll in expected_colls { db.collection(coll) - .insert_one(doc! { "x": 1 }, None) + .insert_one(doc! { "x": 1 }) .await .unwrap(); } @@ -316,6 +318,7 @@ async fn create_index_options_defaults_not_specified() { } async fn index_option_defaults_test(defaults: Option, name: &str) { + #[allow(deprecated)] let client = EventClient::new().await; let db = client.database(name); @@ -325,7 +328,8 @@ async fn index_option_defaults_test(defaults: Option, name: .unwrap(); db.drop().await.unwrap(); - let events = client.get_command_started_events(&["create"]); + #[allow(deprecated)] + let events = client.events.get_command_started_events(&["create"]); assert_eq!(events.len(), 1); let event_defaults = match events[0].command.get_document("indexOptionDefaults") { diff --git a/src/test/documentation_examples.rs b/src/test/documentation_examples.rs index 07a4e3bd2..18a2c28ff 100644 --- a/src/test/documentation_examples.rs +++ b/src/test/documentation_examples.rs @@ -7,7 +7,7 @@ use semver::Version; use crate::{ bson::{doc, Bson}, error::Result, - options::{ClientOptions, FindOptions, ServerApi, ServerApiVersion}, + options::{ClientOptions, ServerApi, ServerApiVersion}, test::{log_uncaptured, TestClient, DEFAULT_URI}, Client, Collection, @@ -39,26 +39,23 @@ async fn insert_examples(collection: &Collection) -> Result<()> { // Start Example 1 collection - .insert_one( - doc! { - "item": "canvas", - "qty": 100, - "tags": ["cotton"], - "size": { - "h": 28, - "w": 35.5, - "uom": "cm", - } - }, - None, - ) + .insert_one(doc! { + "item": "canvas", + "qty": 100, + "tags": ["cotton"], + "size": { + "h": 28, + "w": 35.5, + "uom": "cm", + } + }) .await?; // End Example 1 assert_coll_count!(collection, 1); // Start Example 2 - let cursor = collection.find(doc! { "item": "canvas" }, None).await?; + let cursor = collection.find(doc! { "item": "canvas" }).await?; // End Example 2 assert_cursor_count!(cursor, 1); @@ -97,7 +94,7 @@ async fn insert_examples(collection: &Collection) -> Result<()> { }, ]; - collection.insert_many(docs, None).await?; + collection.insert_many(docs).await?; // End Example 3 assert_coll_count!(collection, 4); @@ -162,33 +159,30 @@ async fn query_top_level_fields_examples(collection: &Collection) -> R }, ]; - collection.insert_many(docs, None).await?; + collection.insert_many(docs).await?; // End Example 6 assert_coll_count!(collection, 5); // Start Example 7 - let cursor = collection.find(None, None).await?; + let cursor = collection.find(doc! {}).await?; // End Example 7 assert_cursor_count!(cursor, 5); // Start Example 9 - let cursor = collection.find(doc! { "status": "D" }, None).await?; + let cursor = collection.find(doc! { "status": "D" }).await?; // End Example 9 assert_cursor_count!(cursor, 2); // Start Example 10 let cursor = collection - .find( - doc! { - "status": { - "$in": ["A", "D"], - } - }, - None, - ) + .find(doc! { + "status": { + "$in": ["A", "D"], + } + }) .await?; // End Example 10 @@ -196,13 +190,10 @@ async fn query_top_level_fields_examples(collection: &Collection) -> R // Start Example 11 let cursor = collection - .find( - doc! { - "status": "A", - "qty": { "$lt": 30 }, - }, - None, - ) + .find(doc! { + "status": "A", + "qty": { "$lt": 30 }, + }) .await?; // End Example 11 @@ -210,17 +201,14 @@ async fn query_top_level_fields_examples(collection: &Collection) -> R // Start Example 12 let cursor = collection - .find( - doc! { - "$or": [ - { "status": "A" }, - { - "qty": { "$lt": 30 }, - } - ], - }, - None, - ) + .find(doc! { + "$or": [ + { "status": "A" }, + { + "qty": { "$lt": 30 }, + } + ], + }) .await?; // End Example 12 @@ -228,20 +216,17 @@ async fn query_top_level_fields_examples(collection: &Collection) -> R // Start Example 13 let cursor = collection - .find( - doc! { - "status": "A", - "$or": [ - { - "qty": { "$lt": 30 }, - }, - { - "item": { "$regex": "^p" }, - }, - ], - }, - None, - ) + .find(doc! { + "status": "A", + "$or": [ + { + "qty": { "$lt": 30 }, + }, + { + "item": { "$regex": "^p" }, + }, + ], + }) .await?; // End Example 13 @@ -307,23 +292,20 @@ async fn query_embedded_documents_examples(collection: &Collection) -> }, ]; - collection.insert_many(docs, None).await?; + collection.insert_many(docs).await?; // End Example 14 assert_coll_count!(collection, 5); // Start Example 15 let cursor = collection - .find( - doc! { - "size": { - "h": 14, - "w": 21, - "uom": "cm", - }, + .find(doc! { + "size": { + "h": 14, + "w": 21, + "uom": "cm", }, - None, - ) + }) .await?; // End Example 15 @@ -331,35 +313,29 @@ async fn query_embedded_documents_examples(collection: &Collection) -> // Start Example 16 let cursor = collection - .find( - doc! { - "size": { - "w": 21, - "h": 14, - "uom": "cm", - }, + .find(doc! { + "size": { + "w": 21, + "h": 14, + "uom": "cm", }, - None, - ) + }) .await?; // End Example 16 assert_cursor_count!(cursor, 0); // Start Example 17 - let cursor = collection.find(doc! { "size.uom": "in" }, None).await?; + let cursor = collection.find(doc! { "size.uom": "in" }).await?; // End Example 17 assert_cursor_count!(cursor, 2); // Start Example 18 let cursor = collection - .find( - doc! { - "size.h": { "$lt": 15 }, - }, - None, - ) + .find(doc! { + "size.h": { "$lt": 15 }, + }) .await?; // End Example 18 @@ -367,14 +343,11 @@ async fn query_embedded_documents_examples(collection: &Collection) -> // Start Example 19 let cursor = collection - .find( - doc! { - "size.h": { "$lt": 15 }, - "size.uom": "in", - "status": "D", - }, - None, - ) + .find(doc! { + "size.h": { "$lt": 15 }, + "size.uom": "in", + "status": "D", + }) .await?; // End Example 19 @@ -420,19 +393,16 @@ async fn query_arrays_examples(collection: &Collection) -> Result<()> }, ]; - collection.insert_many(docs, None).await?; + collection.insert_many(docs).await?; // End Example 20 assert_coll_count!(collection, 5); // Start Example 21 let cursor = collection - .find( - doc! { - "tags": ["red", "blank"], - }, - None, - ) + .find(doc! { + "tags": ["red", "blank"], + }) .await?; // End Example 21 @@ -440,14 +410,11 @@ async fn query_arrays_examples(collection: &Collection) -> Result<()> // Start Example 22 let cursor = collection - .find( - doc! { - "tags": { - "$all": ["red", "blank"], - } - }, - None, - ) + .find(doc! { + "tags": { + "$all": ["red", "blank"], + } + }) .await?; // End Example 22 @@ -455,12 +422,9 @@ async fn query_arrays_examples(collection: &Collection) -> Result<()> // Start Example 23 let cursor = collection - .find( - doc! { - "tags": "red", - }, - None, - ) + .find(doc! { + "tags": "red", + }) .await?; // End Example 23 @@ -468,12 +432,9 @@ async fn query_arrays_examples(collection: &Collection) -> Result<()> // Start Example 24 let cursor = collection - .find( - doc! { - "dim_cm": { "$gt": 25 }, - }, - None, - ) + .find(doc! { + "dim_cm": { "$gt": 25 }, + }) .await?; // End Example 24 @@ -481,15 +442,12 @@ async fn query_arrays_examples(collection: &Collection) -> Result<()> // Start Example 25 let cursor = collection - .find( - doc! { - "dim_cm": { - "$gt": 15, - "$lt": 20, - }, + .find(doc! { + "dim_cm": { + "$gt": 15, + "$lt": 20, }, - None, - ) + }) .await?; // End Example 25 @@ -497,17 +455,14 @@ async fn query_arrays_examples(collection: &Collection) -> Result<()> // Start Example 26 let cursor = collection - .find( - doc! { - "dim_cm": { - "$elemMatch": { - "$gt": 22, - "$lt": 30, - } - }, + .find(doc! { + "dim_cm": { + "$elemMatch": { + "$gt": 22, + "$lt": 30, + } }, - None, - ) + }) .await?; // End Example 26 @@ -515,12 +470,9 @@ async fn query_arrays_examples(collection: &Collection) -> Result<()> // Start Example 27 let cursor = collection - .find( - doc! { - "dim_cm.1": { "$gt": 25 }, - }, - None, - ) + .find(doc! { + "dim_cm.1": { "$gt": 25 }, + }) .await?; // End Example 27 @@ -528,12 +480,9 @@ async fn query_arrays_examples(collection: &Collection) -> Result<()> // Start Example 28 let cursor = collection - .find( - doc! { - "tags": { "$size": 3 }, - }, - None, - ) + .find(doc! { + "tags": { "$size": 3 }, + }) .await?; // End Example 28 @@ -581,22 +530,19 @@ async fn query_array_embedded_documents_examples(collection: &Collection }, ]; - collection.insert_many(docs, None).await?; + collection.insert_many(docs).await?; // End Example 38 assert_coll_count!(collection, 2); // Start Example 39 let cursor = collection - .find( - doc! { - "item": Bson::Null, - }, - None, - ) + .find(doc! { + "item": Bson::Null, + }) .await?; // End Example 39 @@ -749,12 +671,9 @@ async fn query_null_or_missing_fields_examples(collection: &Collection // Start Example 40 let cursor = collection - .find( - doc! { - "item": { "$type": 10 }, - }, - None, - ) + .find(doc! { + "item": { "$type": 10 }, + }) .await?; // End Example 40 @@ -762,12 +681,9 @@ async fn query_null_or_missing_fields_examples(collection: &Collection // Start Example 41 let cursor = collection - .find( - doc! { - "item": { "$exists": false }, - }, - None, - ) + .find(doc! { + "item": { "$exists": false }, + }) .await?; // End Example 41 @@ -863,39 +779,30 @@ async fn projection_examples(collection: &Collection) -> Result<()> { }, ]; - collection.insert_many(docs, None).await?; + collection.insert_many(docs).await?; // End Example 42 assert_coll_count!(collection, 5); // Start Example 43 let cursor = collection - .find( - doc! { - "status": "A", - }, - None, - ) + .find(doc! { + "status": "A", + }) .await?; // End Example 43 assert_cursor_count!(cursor, 3); // Start Example 44 - let options = FindOptions::builder() + let cursor = collection + .find(doc! { + "status": "A", + }) .projection(doc! { "item": 1, "status": 1, }) - .build(); - - let cursor = collection - .find( - doc! { - "status": "A", - }, - options, - ) .await?; // End Example 44 @@ -908,21 +815,15 @@ async fn projection_examples(collection: &Collection) -> Result<()> { }); // Start Example 45 - let options = FindOptions::builder() + let cursor = collection + .find(doc! { + "status": "A", + }) .projection(doc! { "item": 1, "status": 1, "_id": 0, }) - .build(); - - let cursor = collection - .find( - doc! { - "status": "A", - }, - options, - ) .await?; // End Example 45 @@ -935,20 +836,14 @@ async fn projection_examples(collection: &Collection) -> Result<()> { }); // Start Example 46 - let options = FindOptions::builder() + let cursor = collection + .find(doc! { + "status": "A", + }) .projection(doc! { "status": 0, "instock": 0, }) - .build(); - - let cursor = collection - .find( - doc! { - "status": "A", - }, - options, - ) .await?; // End Example 46 @@ -961,21 +856,15 @@ async fn projection_examples(collection: &Collection) -> Result<()> { }); // Start Example 47 - let options = FindOptions::builder() + let cursor = collection + .find(doc! { + "status": "A", + }) .projection(doc! { "item": 1, "status": 1, "size.uom": 1, }) - .build(); - - let cursor = collection - .find( - doc! { - "status": "A", - }, - options, - ) .await?; // End Example 47 @@ -994,19 +883,13 @@ async fn projection_examples(collection: &Collection) -> Result<()> { }); // Start Example 48 - let options = FindOptions::builder() + let cursor = collection + .find(doc! { + "status": "A", + }) .projection(doc! { "size.uom": 0, }) - .build(); - - let cursor = collection - .find( - doc! { - "status": "A", - }, - options, - ) .await?; // End Example 48 @@ -1025,21 +908,15 @@ async fn projection_examples(collection: &Collection) -> Result<()> { }); // Start Example 50 - let options = FindOptions::builder() + let cursor = collection + .find(doc! { + "status": "A", + }) .projection(doc! { "item": 1, "status": 1, "instock": { "$slice": -1 }, }) - .build(); - - let cursor = collection - .find( - doc! { - "status": "A", - }, - options, - ) .await?; // End Example 50 @@ -1165,7 +1042,7 @@ async fn update_examples(collection: &Collection) -> Result<()> { }, ]; - collection.insert_many(docs, None).await?; + collection.insert_many(docs).await?; // End Example 51 assert_coll_count!(collection, 10); @@ -1186,10 +1063,7 @@ async fn update_examples(collection: &Collection) -> Result<()> { // End Example 52 run_on_each_doc!( - collection - .find(doc! { "item": "paper" }, None) - .await - .unwrap(), + collection.find(doc! { "item": "paper" }).await.unwrap(), doc, { let uom = doc.get_document("size").unwrap().get_str("uom").unwrap(); @@ -1221,12 +1095,9 @@ async fn update_examples(collection: &Collection) -> Result<()> { run_on_each_doc!( collection - .find( - doc! { - "qty": { "$lt": 50 }, - }, - None, - ) + .find(doc! { + "qty": { "$lt": 50 }, + }) .await .unwrap(), doc, @@ -1258,16 +1129,12 @@ async fn update_examples(collection: &Collection) -> Result<()> { }, ], }, - None, ) .await?; // End Example 54 run_on_each_doc!( - collection - .find(doc! { "item": "paper" }, None,) - .await - .unwrap(), + collection.find(doc! { "item": "paper" }).await.unwrap(), doc, { assert_eq!(doc.len(), 3); @@ -1340,7 +1207,7 @@ async fn delete_examples(collection: &Collection) -> Result<()> { }, ]; - collection.insert_many(docs, None).await?; + collection.insert_many(docs).await?; // End Example 55 assert_coll_count!(collection, 5); @@ -1453,7 +1320,7 @@ async fn stable_api_examples() -> GenericResult<()> { doc! { "_id" : 6, "item" : "xyz", "price" : 5, "quantity" : 5, "date" : iso_date("2021-02-15T12:05:10Z")? }, doc! { "_id" : 7, "item" : "xyz", "price" : 5, "quantity" : 10, "date" : iso_date("2021-02-15T14:12:12Z")? }, doc! { "_id" : 8, "item" : "abc", "price" : 10, "quantity" : 5, "date" : iso_date("2021-03-16T20:20:13Z")? } - ], None).await?; + ]).await?; // End Versioned API Example 5 // Start Versioned API Example 6 @@ -1635,16 +1502,13 @@ async fn run_command_examples() -> Result<()> { let db = client.database("run_command_examples"); db.drop().await?; db.collection::("restaurants") - .insert_one( - doc! { - "name": "Chez Panisse", - "city": "Oakland", - "state": "California", - "country": "United States", - "rating": 4.4, - }, - None, - ) + .insert_one(doc! { + "name": "Chez Panisse", + "city": "Oakland", + "state": "California", + "country": "United States", + "rating": 4.4, + }) .await?; #[allow(unused)] @@ -1665,46 +1529,40 @@ async fn index_examples() -> Result<()> { let db = client.database("index_examples"); db.drop().await?; db.collection::("records") - .insert_many( - vec![ - doc! { - "student": "Marty McFly", - "classYear": 1986, - "school": "Hill Valley High", - "score": 56.5, - }, - doc! { - "student": "Ferris F. Bueller", - "classYear": 1987, - "school": "Glenbrook North High", - "status": "Suspended", - "score": 76.0, - }, - ], - None, - ) + .insert_many(vec![ + doc! { + "student": "Marty McFly", + "classYear": 1986, + "school": "Hill Valley High", + "score": 56.5, + }, + doc! { + "student": "Ferris F. Bueller", + "classYear": 1987, + "school": "Glenbrook North High", + "status": "Suspended", + "score": 76.0, + }, + ]) .await?; db.collection::("restaurants") - .insert_many( - vec![ - doc! { - "name": "Chez Panisse", - "city": "Oakland", - "state": "California", - "country": "United States", - "rating": 4.4, - }, - doc! { - "name": "Eleven Madison Park", - "cuisine": "French", - "city": "New York City", - "state": "New York", - "country": "United States", - "rating": 7.1, - }, - ], - None, - ) + .insert_many(vec![ + doc! { + "name": "Chez Panisse", + "city": "Oakland", + "state": "California", + "country": "United States", + "rating": 4.4, + }, + doc! { + "name": "Eleven Madison Park", + "cuisine": "French", + "city": "New York City", + "state": "New York", + "country": "United States", + "rating": 7.1, + }, + ]) .await?; use crate::IndexModel; @@ -1747,7 +1605,7 @@ async fn change_streams_examples() -> Result<()> { db.drop().await?; let inventory = db.collection::("inventory"); // Populate an item so the collection exists for the change stream to watch. - inventory.insert_one(doc! {}, None).await?; + inventory.insert_one(doc! {}).await?; // Background writer thread so that the `stream.next()` calls return something. let (tx, mut rx) = tokio::sync::oneshot::channel(); @@ -1757,7 +1615,7 @@ async fn change_streams_examples() -> Result<()> { loop { tokio::select! { _ = interval.tick() => { - writer_inventory.insert_one(doc! {}, None).await?; + writer_inventory.insert_one(doc! {}).await?; } _ = &mut rx => break, } @@ -1832,12 +1690,12 @@ async fn convenient_transaction_examples() -> Result<()> { client .database("mydb1") .collection::("foo") - .insert_one(doc! { "abc": 0}, None) + .insert_one(doc! { "abc": 0}) .await?; client .database("mydb2") .collection::("bar") - .insert_one(doc! { "xyz": 0}, None) + .insert_one(doc! { "xyz": 0}) .await?; // Step 1: Define the callback that specifies the sequence of operations to perform inside the @@ -1854,10 +1712,12 @@ async fn convenient_transaction_examples() -> Result<()> { // Important: You must pass the session to the operations. collection_one - .insert_one_with_session(doc! { "abc": 1 }, None, session) + .insert_one(doc! { "abc": 1 }) + .session(&mut *session) .await?; collection_two - .insert_one_with_session(doc! { "xyz": 999 }, None, session) + .insert_one(doc! { "xyz": 999 }) + .session(session) .await?; Ok(()) @@ -1866,10 +1726,11 @@ async fn convenient_transaction_examples() -> Result<()> { // Step 2: Start a client session. let mut session = client.start_session().await?; - // Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or + // Step 3: Use and_run to start a transaction, execute the callback, and commit (or // abort on error). session - .with_transaction((), |session, _| callback(session).boxed(), None) + .start_transaction() + .and_run((), |session, _| callback(session).boxed()) .await?; // End Transactions withTxn API Example 1 diff --git a/src/test/documentation_examples/aggregation_data.rs b/src/test/documentation_examples/aggregation_data.rs index d32476c0d..b4fd5d449 100644 --- a/src/test/documentation_examples/aggregation_data.rs +++ b/src/test/documentation_examples/aggregation_data.rs @@ -13,242 +13,233 @@ pub(crate) async fn populate(db: &Database) -> GenericResult<()> { let date_20180111 = DateTime::parse_rfc3339_str("2018-01-11T07:15:00.000Z")?; db.collection("sales") - .insert_many( - vec![ - doc! { - "date": date_20180208, - "items": [ - doc! { - "fruit": "kiwi", - "quantity": 2, - "price": 0.5, - }, - doc! { - "fruit": "apple", - "quantity": 1, - "price": 1.0, - }, - ], - }, - doc! { - "date": date_20180109, - "items": [ - doc! { - "fruit": "banana", - "quantity": 8, - "price": 1.0, - }, - doc! { - "fruit": "apple", - "quantity": 1, - "price": 1.0, - }, - doc! { - "fruit": "papaya", - "quantity": 1, - "price": 4.0, - }, - ], - }, - doc! { - "date": date_20180127, - "items": [ - doc! { - "fruit": "banana", - "quantity": 1, - "price": 1.0, - }, - ], - }, - doc! { - "date": date_20180203, - "items": [ - doc! { - "fruit": "banana", - "quantity": 1, - "price": 1.0, - }, - ], - }, - doc! { - "date": date_20180205, - "items": [ - doc! { - "fruit": "banana", - "quantity": 1, - "price": 1.0, - }, - doc! { - "fruit": "mango", - "quantity": 2, - "price": 2.0, - }, - doc! { - "fruit": "apple", - "quantity": 1, - "price": 1.0, - }, - ], - }, - doc! { - "date": date_20180111, - "items": [ - doc! { - "fruit": "banana", - "quantity": 1, - "price": 1.0, - }, - doc! { - "fruit": "apple", - "quantity": 1, - "price": 1.0, - }, - doc! { - "fruit": "papaya", - "quantity": 3, - "price": 4.0, - }, - ], - }, - ], - None, - ) + .insert_many(vec![ + doc! { + "date": date_20180208, + "items": [ + doc! { + "fruit": "kiwi", + "quantity": 2, + "price": 0.5, + }, + doc! { + "fruit": "apple", + "quantity": 1, + "price": 1.0, + }, + ], + }, + doc! { + "date": date_20180109, + "items": [ + doc! { + "fruit": "banana", + "quantity": 8, + "price": 1.0, + }, + doc! { + "fruit": "apple", + "quantity": 1, + "price": 1.0, + }, + doc! { + "fruit": "papaya", + "quantity": 1, + "price": 4.0, + }, + ], + }, + doc! { + "date": date_20180127, + "items": [ + doc! { + "fruit": "banana", + "quantity": 1, + "price": 1.0, + }, + ], + }, + doc! { + "date": date_20180203, + "items": [ + doc! { + "fruit": "banana", + "quantity": 1, + "price": 1.0, + }, + ], + }, + doc! { + "date": date_20180205, + "items": [ + doc! { + "fruit": "banana", + "quantity": 1, + "price": 1.0, + }, + doc! { + "fruit": "mango", + "quantity": 2, + "price": 2.0, + }, + doc! { + "fruit": "apple", + "quantity": 1, + "price": 1.0, + }, + ], + }, + doc! { + "date": date_20180111, + "items": [ + doc! { + "fruit": "banana", + "quantity": 1, + "price": 1.0, + }, + doc! { + "fruit": "apple", + "quantity": 1, + "price": 1.0, + }, + doc! { + "fruit": "papaya", + "quantity": 3, + "price": 4.0, + }, + ], + }, + ]) .await?; db.collection("airlines") - .insert_many( - vec![ - doc! { - "airline": 17, - "name": "Air Canada", - "alias": "AC", - "iata": "ACA", - "icao": "AIR CANADA", - "active": "Y", - "country": "Canada", - "base": "TAL", - }, - doc! { - "airline": 18, - "name": "Turkish Airlines", - "alias": "YK", - "iata": "TRK", - "icao": "TURKISH", - "active": "Y", - "country": "Turkey", - "base": "AET", - }, - doc! { - "airline": 22, - "name": "Saudia", - "alias": "SV", - "iata": "SVA", - "icao": "SAUDIA", - "active": "Y", - "country": "Saudi Arabia", - "base": "JSU", - }, - doc! { - "airline": 29, - "name": "Finnair", - "alias": "AY", - "iata": "FIN", - "icao": "FINNAIR", - "active": "Y", - "country": "Finland", - "base": "JMZ", - }, - doc! { - "airline": 34, - "name": "Afric'air Express", - "alias": "", - "iata": "AAX", - "icao": "AFREX", - "active": "N", - "country": "Ivory Coast", - "base": "LOK", - }, - doc! { - "airline": 37, - "name": "Artem-Avia", - "alias": "", - "iata": "ABA", - "icao": "ARTEM-AVIA", - "active": "N", - "country": "Ukraine", - "base": "JBR", - }, - doc! { - "airline": 38, - "name": "Lufthansa", - "alias": "LH", - "iata": "DLH", - "icao": "LUFTHANSA", - "active": "Y", - "country": "Germany", - "base": "CYS", - }, - ], - None, - ) + .insert_many(vec![ + doc! { + "airline": 17, + "name": "Air Canada", + "alias": "AC", + "iata": "ACA", + "icao": "AIR CANADA", + "active": "Y", + "country": "Canada", + "base": "TAL", + }, + doc! { + "airline": 18, + "name": "Turkish Airlines", + "alias": "YK", + "iata": "TRK", + "icao": "TURKISH", + "active": "Y", + "country": "Turkey", + "base": "AET", + }, + doc! { + "airline": 22, + "name": "Saudia", + "alias": "SV", + "iata": "SVA", + "icao": "SAUDIA", + "active": "Y", + "country": "Saudi Arabia", + "base": "JSU", + }, + doc! { + "airline": 29, + "name": "Finnair", + "alias": "AY", + "iata": "FIN", + "icao": "FINNAIR", + "active": "Y", + "country": "Finland", + "base": "JMZ", + }, + doc! { + "airline": 34, + "name": "Afric'air Express", + "alias": "", + "iata": "AAX", + "icao": "AFREX", + "active": "N", + "country": "Ivory Coast", + "base": "LOK", + }, + doc! { + "airline": 37, + "name": "Artem-Avia", + "alias": "", + "iata": "ABA", + "icao": "ARTEM-AVIA", + "active": "N", + "country": "Ukraine", + "base": "JBR", + }, + doc! { + "airline": 38, + "name": "Lufthansa", + "alias": "LH", + "iata": "DLH", + "icao": "LUFTHANSA", + "active": "Y", + "country": "Germany", + "base": "CYS", + }, + ]) .await?; db.collection("air_alliances") - .insert_many( - vec![ - doc! { - "name": "Star Alliance", - "airlines": [ - "Air Canada", - "Avianca", - "Air China", - "Air New Zealand", - "Asiana Airlines", - "Brussels Airlines", - "Copa Airlines", - "Croatia Airlines", - "EgyptAir", - "TAP Portugal", - "United Airlines", - "Turkish Airlines", - "Swiss International Air Lines", - "Lufthansa", - ], - }, - doc! { - "name": "SkyTeam", - "airlines": [ - "Aerolinias Argentinas", - "Aeromexico", - "Air Europa", - "Air France", - "Alitalia", - "Delta Air Lines", - "Garuda Indonesia", - "Kenya Airways", - "KLM", - "Korean Air", - "Middle East Airlines", - "Saudia", - ], - }, - doc! { - "name": "OneWorld", - "airlines": [ - "Air Berlin", - "American Airlines", - "British Airways", - "Cathay Pacific", - "Finnair", - "Iberia Airlines", - "Japan Airlines", - "LATAM Chile", - "LATAM Brasil", - "Malasya Airlines", - "Canadian Airlines", - ], - }, - ], - None, - ) + .insert_many(vec![ + doc! { + "name": "Star Alliance", + "airlines": [ + "Air Canada", + "Avianca", + "Air China", + "Air New Zealand", + "Asiana Airlines", + "Brussels Airlines", + "Copa Airlines", + "Croatia Airlines", + "EgyptAir", + "TAP Portugal", + "United Airlines", + "Turkish Airlines", + "Swiss International Air Lines", + "Lufthansa", + ], + }, + doc! { + "name": "SkyTeam", + "airlines": [ + "Aerolinias Argentinas", + "Aeromexico", + "Air Europa", + "Air France", + "Alitalia", + "Delta Air Lines", + "Garuda Indonesia", + "Kenya Airways", + "KLM", + "Korean Air", + "Middle East Airlines", + "Saudia", + ], + }, + doc! { + "name": "OneWorld", + "airlines": [ + "Air Berlin", + "American Airlines", + "British Airways", + "Cathay Pacific", + "Finnair", + "Iberia Airlines", + "Japan Airlines", + "LATAM Chile", + "LATAM Brasil", + "Malasya Airlines", + "Canadian Airlines", + ], + }, + ]) .await?; Ok(()) diff --git a/src/test/index_management.rs b/src/test/index_management.rs index aea421efd..98f3c87b9 100644 --- a/src/test/index_management.rs +++ b/src/test/index_management.rs @@ -1,13 +1,12 @@ use futures::stream::TryStreamExt; +#[allow(deprecated)] +use crate::test::util::EventClient; use crate::{ bson::doc, error::ErrorKind, options::{CommitQuorum, IndexOptions}, - test::{ - log_uncaptured, - util::{EventClient, TestClient}, - }, + test::{log_uncaptured, util::TestClient}, IndexModel, }; @@ -204,6 +203,7 @@ async fn index_management_drops() { // Test that index management commands execute the expected database commands. #[tokio::test] #[function_name::named] +#[allow(deprecated)] async fn index_management_executes_commands() { let client = EventClient::new().await; let coll = client @@ -212,14 +212,20 @@ async fn index_management_executes_commands() { // Collection::create_index and Collection::create_indexes execute createIndexes. assert_eq!( - client.get_command_started_events(&["createIndexes"]).len(), + client + .events + .get_command_started_events(&["createIndexes"]) + .len(), 0 ); coll.create_index(IndexModel::builder().keys(doc! { "a": 1 }).build()) .await .expect("Create Index op failed"); assert_eq!( - client.get_command_started_events(&["createIndexes"]).len(), + client + .events + .get_command_started_events(&["createIndexes"]) + .len(), 1 ); coll.create_indexes(vec![ @@ -229,23 +235,62 @@ async fn index_management_executes_commands() { .await .expect("Create Indexes op failed"); assert_eq!( - client.get_command_started_events(&["createIndexes"]).len(), + client + .events + .get_command_started_events(&["createIndexes"]) + .len(), 2 ); // Collection::list_indexes and Collection::list_index_names execute listIndexes. - assert_eq!(client.get_command_started_events(&["listIndexes"]).len(), 0); + assert_eq!( + client + .events + .get_command_started_events(&["listIndexes"]) + .len(), + 0 + ); coll.list_indexes().await.expect("List index op failed"); - assert_eq!(client.get_command_started_events(&["listIndexes"]).len(), 1); + assert_eq!( + client + .events + .get_command_started_events(&["listIndexes"]) + .len(), + 1 + ); coll.list_index_names().await.expect("List index op failed"); - assert_eq!(client.get_command_started_events(&["listIndexes"]).len(), 2); + assert_eq!( + client + .events + .get_command_started_events(&["listIndexes"]) + .len(), + 2 + ); // Collection::drop_index and Collection::drop_indexes execute dropIndexes. - assert_eq!(client.get_command_started_events(&["dropIndexes"]).len(), 0); + assert_eq!( + client + .events + .get_command_started_events(&["dropIndexes"]) + .len(), + 0 + ); coll.drop_index("a_1").await.expect("Drop index op failed"); - assert_eq!(client.get_command_started_events(&["dropIndexes"]).len(), 1); + assert_eq!( + client + .events + .get_command_started_events(&["dropIndexes"]) + .len(), + 1 + ); coll.drop_indexes().await.expect("Drop indexes op failed"); - assert_eq!(client.get_command_started_events(&["dropIndexes"]).len(), 2); + assert_eq!( + client + .events + .get_command_started_events(&["dropIndexes"]) + .len(), + 2 + ); } #[tokio::test] diff --git a/src/test/spec/auth.rs b/src/test/spec/auth.rs index 4c3db53f5..9f69ed599 100644 --- a/src/test/spec/auth.rs +++ b/src/test/spec/auth.rs @@ -32,7 +32,7 @@ impl From for Credential { .mechanism .and_then(|s| AuthMechanism::from_str(s.as_str()).ok()), mechanism_properties: test_credential.mechanism_properties, - oidc_callbacks: None, + oidc_callback: None, } } } diff --git a/src/test/spec/connection_stepdown.rs b/src/test/spec/connection_stepdown.rs index 2e9249e2a..6aa7eab58 100644 --- a/src/test/spec/connection_stepdown.rs +++ b/src/test/spec/connection_stepdown.rs @@ -2,16 +2,19 @@ use std::{future::Future, time::Duration}; use futures::stream::StreamExt; +#[allow(deprecated)] +use crate::test::util::EventClient; use crate::{ bson::{doc, Document}, error::{CommandError, ErrorKind}, - options::{Acknowledgment, ClientOptions, FindOptions, InsertManyOptions, WriteConcern}, + options::{Acknowledgment, ClientOptions, WriteConcern}, selection_criteria::SelectionCriteria, - test::{get_client_options, log_uncaptured, util::EventClient}, + test::{get_client_options, log_uncaptured}, Collection, Database, }; +#[allow(deprecated)] async fn run_test( name: &str, test: impl Fn(EventClient, Database, Collection) -> F, @@ -49,6 +52,7 @@ async fn run_test( #[tokio::test] async fn get_more() { + #[allow(deprecated)] async fn get_more_test(client: EventClient, _db: Database, coll: Collection) { // This test requires server version 4.2 or higher. if client.server_version_lt(4, 2) { @@ -57,22 +61,13 @@ async fn get_more() { } let docs = vec![doc! { "x": 1 }; 5]; - coll.insert_many( - docs, - Some( - InsertManyOptions::builder() - .write_concern(WriteConcern::builder().w(Acknowledgment::Majority).build()) - .build(), - ), - ) - .await - .unwrap(); - - let mut cursor = coll - .find(None, Some(FindOptions::builder().batch_size(2).build())) + coll.insert_many(docs) + .write_concern(WriteConcern::majority()) .await .unwrap(); + let mut cursor = coll.find(doc! {}).batch_size(2).await.unwrap(); + let db = client.database("admin"); db.run_command(doc! { "replSetFreeze": 0 }) @@ -99,7 +94,7 @@ async fn get_more() { } tokio::time::sleep(Duration::from_millis(250)).await; - assert_eq!(client.count_pool_cleared_events(), 0); + assert_eq!(client.events.count_pool_cleared_events(), 0); } run_test("get_more", get_more_test).await; @@ -107,6 +102,7 @@ async fn get_more() { #[tokio::test] async fn notwritableprimary_keep_pool() { + #[allow(deprecated)] async fn notwritableprimary_keep_pool_test( client: EventClient, _db: Database, @@ -131,7 +127,7 @@ async fn notwritableprimary_keep_pool() { .await .unwrap(); - let result = coll.insert_one(doc! { "test": 1 }, None).await; + let result = coll.insert_one(doc! { "test": 1 }).await; assert!( matches!( result.map_err(|e| *e.kind), @@ -140,12 +136,12 @@ async fn notwritableprimary_keep_pool() { "insert should have failed" ); - coll.insert_one(doc! { "test": 1 }, None) + coll.insert_one(doc! { "test": 1 }) .await .expect("insert should have succeeded"); tokio::time::sleep(Duration::from_millis(250)).await; - assert_eq!(client.count_pool_cleared_events(), 0); + assert_eq!(client.events.count_pool_cleared_events(), 0); } run_test( @@ -157,6 +153,7 @@ async fn notwritableprimary_keep_pool() { #[tokio::test] async fn notwritableprimary_reset_pool() { + #[allow(deprecated)] async fn notwritableprimary_reset_pool_test( client: EventClient, _db: Database, @@ -183,7 +180,7 @@ async fn notwritableprimary_reset_pool() { .await .unwrap(); - let result = coll.insert_one(doc! { "test": 1 }, None).await; + let result = coll.insert_one(doc! { "test": 1 }).await; assert!( matches!( result.map_err(|e| *e.kind), @@ -193,9 +190,9 @@ async fn notwritableprimary_reset_pool() { ); tokio::time::sleep(Duration::from_millis(250)).await; - assert_eq!(client.count_pool_cleared_events(), 1); + assert_eq!(client.events.count_pool_cleared_events(), 1); - coll.insert_one(doc! { "test": 1 }, None) + coll.insert_one(doc! { "test": 1 }) .await .expect("insert should have succeeded"); } @@ -209,6 +206,7 @@ async fn notwritableprimary_reset_pool() { #[tokio::test] async fn shutdown_in_progress() { + #[allow(deprecated)] async fn shutdown_in_progress_test( client: EventClient, _db: Database, @@ -232,7 +230,7 @@ async fn shutdown_in_progress() { .await .unwrap(); - let result = coll.insert_one(doc! { "test": 1 }, None).await; + let result = coll.insert_one(doc! { "test": 1 }).await; assert!( matches!( result.map_err(|e| *e.kind), @@ -242,9 +240,9 @@ async fn shutdown_in_progress() { ); tokio::time::sleep(Duration::from_millis(250)).await; - assert_eq!(client.count_pool_cleared_events(), 1); + assert_eq!(client.events.count_pool_cleared_events(), 1); - coll.insert_one(doc! { "test": 1 }, None) + coll.insert_one(doc! { "test": 1 }) .await .expect("insert should have succeeded"); } @@ -254,6 +252,7 @@ async fn shutdown_in_progress() { #[tokio::test] async fn interrupted_at_shutdown() { + #[allow(deprecated)] async fn interrupted_at_shutdown_test( client: EventClient, _db: Database, @@ -277,7 +276,7 @@ async fn interrupted_at_shutdown() { .await .unwrap(); - let result = coll.insert_one(doc! { "test": 1 }, None).await; + let result = coll.insert_one(doc! { "test": 1 }).await; assert!( matches!( result.map_err(|e| *e.kind), @@ -287,9 +286,9 @@ async fn interrupted_at_shutdown() { ); tokio::time::sleep(Duration::from_millis(250)).await; - assert_eq!(client.count_pool_cleared_events(), 1); + assert_eq!(client.events.count_pool_cleared_events(), 1); - coll.insert_one(doc! { "test": 1 }, None) + coll.insert_one(doc! { "test": 1 }) .await .expect("insert should have succeeded"); diff --git a/src/test/spec/crud_v1.rs b/src/test/spec/crud_v1.rs index 1b955d50d..e817b361b 100644 --- a/src/test/spec/crud_v1.rs +++ b/src/test/spec/crud_v1.rs @@ -20,7 +20,6 @@ use serde::Deserialize; use crate::{ bson::{doc, Document}, - coll::options::FindOptions, test::log_uncaptured, Collection, }; @@ -63,8 +62,8 @@ pub struct CollectionOutcome { } pub async fn find_all(coll: &Collection) -> Vec { - let options = FindOptions::builder().sort(doc! { "_id": 1 }).build(); - coll.find(None, options) + coll.find(doc! {}) + .sort(doc! { "_id": 1 }) .await .unwrap() .try_collect() diff --git a/src/test/spec/crud_v1/aggregate.rs b/src/test/spec/crud_v1/aggregate.rs index c6ed4acfb..b126b895e 100644 --- a/src/test/spec/crud_v1/aggregate.rs +++ b/src/test/spec/crud_v1/aggregate.rs @@ -34,7 +34,7 @@ async fn run_aggregate_test(test_file: TestFile) { &test_case.description.replace('$', "%").replace(' ', "_"), ) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); diff --git a/src/test/spec/crud_v1/count.rs b/src/test/spec/crud_v1/count.rs index 5fca5a8d3..9ea6c9790 100644 --- a/src/test/spec/crud_v1/count.rs +++ b/src/test/spec/crud_v1/count.rs @@ -35,7 +35,7 @@ async fn run_count_test(test_file: TestFile) { .await; if !data.is_empty() { - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); } diff --git a/src/test/spec/crud_v1/delete_many.rs b/src/test/spec/crud_v1/delete_many.rs index adf8a372d..aade4c8c0 100644 --- a/src/test/spec/crud_v1/delete_many.rs +++ b/src/test/spec/crud_v1/delete_many.rs @@ -36,7 +36,7 @@ async fn run_delete_many_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); diff --git a/src/test/spec/crud_v1/delete_one.rs b/src/test/spec/crud_v1/delete_one.rs index 48e1b6f7a..84fb209a8 100644 --- a/src/test/spec/crud_v1/delete_one.rs +++ b/src/test/spec/crud_v1/delete_one.rs @@ -36,7 +36,7 @@ async fn run_delete_one_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); diff --git a/src/test/spec/crud_v1/distinct.rs b/src/test/spec/crud_v1/distinct.rs index 28dc84fe4..112f527d6 100644 --- a/src/test/spec/crud_v1/distinct.rs +++ b/src/test/spec/crud_v1/distinct.rs @@ -30,7 +30,7 @@ async fn run_distinct_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); diff --git a/src/test/spec/crud_v1/find.rs b/src/test/spec/crud_v1/find.rs index dcff05e5b..2db5a522b 100644 --- a/src/test/spec/crud_v1/find.rs +++ b/src/test/spec/crud_v1/find.rs @@ -33,7 +33,7 @@ async fn run_find_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); @@ -57,7 +57,8 @@ async fn run_find_test(test_file: TestFile) { }; let cursor = coll - .find(arguments.filter, options) + .find(arguments.filter) + .with_options(options) .await .expect(&test_case.description); assert_eq!( diff --git a/src/test/spec/crud_v1/find_one_and_delete.rs b/src/test/spec/crud_v1/find_one_and_delete.rs index eba24a45b..23aca2c7e 100644 --- a/src/test/spec/crud_v1/find_one_and_delete.rs +++ b/src/test/spec/crud_v1/find_one_and_delete.rs @@ -31,7 +31,7 @@ async fn run_find_one_and_delete_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); @@ -54,7 +54,8 @@ async fn run_find_one_and_delete_test(test_file: TestFile) { }; let result = coll - .find_one_and_delete(arguments.filter, options) + .find_one_and_delete(arguments.filter) + .with_options(options) .await .expect(&test_case.description); assert_eq!(result, outcome.result, "{}", test_case.description); diff --git a/src/test/spec/crud_v1/find_one_and_replace.rs b/src/test/spec/crud_v1/find_one_and_replace.rs index b2501d4a9..fd0728bfb 100644 --- a/src/test/spec/crud_v1/find_one_and_replace.rs +++ b/src/test/spec/crud_v1/find_one_and_replace.rs @@ -37,7 +37,7 @@ async fn run_find_one_and_replace_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description[..sub]) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); @@ -68,7 +68,8 @@ async fn run_find_one_and_replace_test(test_file: TestFile) { }; let result = coll - .find_one_and_replace(arguments.filter, arguments.replacement, options) + .find_one_and_replace(arguments.filter, arguments.replacement) + .with_options(options) .await .expect(&test_case.description); assert_eq!( diff --git a/src/test/spec/crud_v1/find_one_and_update.rs b/src/test/spec/crud_v1/find_one_and_update.rs index 66c48f265..db91c4c87 100644 --- a/src/test/spec/crud_v1/find_one_and_update.rs +++ b/src/test/spec/crud_v1/find_one_and_update.rs @@ -38,7 +38,7 @@ async fn run_find_one_and_update_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description[..sub]) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); @@ -71,7 +71,8 @@ async fn run_find_one_and_update_test(test_file: TestFile) { }; let result = coll - .find_one_and_update(arguments.filter, arguments.update, options) + .find_one_and_update(arguments.filter, arguments.update) + .with_options(options) .await .expect(&test_case.description); assert_eq!( diff --git a/src/test/spec/crud_v1/insert_many.rs b/src/test/spec/crud_v1/insert_many.rs index 7a59c4725..5a3b46ae7 100644 --- a/src/test/spec/crud_v1/insert_many.rs +++ b/src/test/spec/crud_v1/insert_many.rs @@ -3,7 +3,6 @@ use serde::Deserialize; use super::{run_crud_v1_test, Outcome, TestFile}; use crate::{ bson::{Bson, Document}, - options::InsertManyOptions, test::util::TestClient, }; @@ -37,7 +36,7 @@ async fn run_insert_many_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); @@ -46,11 +45,11 @@ async fn run_insert_many_test(test_file: TestFile) { let outcome: Outcome = bson::from_bson(Bson::Document(test_case.outcome)).expect(&test_case.description); - let options = InsertManyOptions::builder() + let result = match coll + .insert_many(arguments.documents) .ordered(arguments.options.ordered) - .build(); - - let result = match coll.insert_many(arguments.documents, options).await { + .await + { Ok(result) => { assert_ne!(outcome.error, Some(true), "{}", test_case.description); result.inserted_ids diff --git a/src/test/spec/crud_v1/insert_one.rs b/src/test/spec/crud_v1/insert_one.rs index 8ed8ec5f8..5f159b1f1 100644 --- a/src/test/spec/crud_v1/insert_one.rs +++ b/src/test/spec/crud_v1/insert_one.rs @@ -32,7 +32,7 @@ async fn run_insert_one_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); @@ -48,7 +48,7 @@ async fn run_insert_one_test(test_file: TestFile) { } let result = coll - .insert_one(arguments.document, None) + .insert_one(arguments.document) .await .expect(&test_case.description); assert_eq!( diff --git a/src/test/spec/crud_v1/replace_one.rs b/src/test/spec/crud_v1/replace_one.rs index f5cfb1609..42f56ee46 100644 --- a/src/test/spec/crud_v1/replace_one.rs +++ b/src/test/spec/crud_v1/replace_one.rs @@ -41,7 +41,7 @@ async fn run_replace_one_test(test_file: TestFile) { &test_case.description.replace('$', "%").replace(' ', "_"), ) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); @@ -63,7 +63,8 @@ async fn run_replace_one_test(test_file: TestFile) { }; let result = coll - .replace_one(arguments.filter, arguments.replacement, options) + .replace_one(arguments.filter, arguments.replacement) + .with_options(options) .await .expect(&test_case.description); assert_eq!( diff --git a/src/test/spec/crud_v1/update_many.rs b/src/test/spec/crud_v1/update_many.rs index 1f4e33c0e..7b53a92c8 100644 --- a/src/test/spec/crud_v1/update_many.rs +++ b/src/test/spec/crud_v1/update_many.rs @@ -41,7 +41,7 @@ async fn run_update_many_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); diff --git a/src/test/spec/crud_v1/update_one.rs b/src/test/spec/crud_v1/update_one.rs index cfe08e259..3b7288231 100644 --- a/src/test/spec/crud_v1/update_one.rs +++ b/src/test/spec/crud_v1/update_one.rs @@ -41,7 +41,7 @@ async fn run_update_one_test(test_file: TestFile) { let coll = client .init_db_and_coll(function_name!(), &test_case.description) .await; - coll.insert_many(data.clone(), None) + coll.insert_many(data.clone()) .await .expect(&test_case.description); diff --git a/src/test/spec/gridfs.rs b/src/test/spec/gridfs.rs index bc29758d9..b9403a47d 100644 --- a/src/test/spec/gridfs.rs +++ b/src/test/spec/gridfs.rs @@ -38,12 +38,14 @@ async fn download_stream_across_buffers() { bucket.drop().await.unwrap(); let data: Vec = (0..20).collect(); - let id = bucket - .upload_from_futures_0_3_reader("test", &data[..], None) - .await - .unwrap(); + let id = { + let mut stream = bucket.open_upload_stream("test").await.unwrap(); + stream.write_all(&data[..]).await.unwrap(); + stream.close().await.unwrap(); + stream.id().clone() + }; - let mut download_stream = bucket.open_download_stream(id.into()).await.unwrap(); + let mut download_stream = bucket.open_download_stream(id).await.unwrap(); let mut buf = vec![0u8; 12]; // read in a partial chunk @@ -104,19 +106,26 @@ async fn upload_test(bucket: &GridFsBucket, data: &[u8], options: Option = (0..20).collect(); @@ -169,7 +181,10 @@ async fn upload_stream_multiple_buffers() { let mut uploaded = Vec::new(); bucket - .download_to_futures_0_3_writer(upload_stream.id().clone(), &mut uploaded) + .open_download_stream(upload_stream.id().clone()) + .await + .unwrap() + .read_to_end(&mut uploaded) .await .unwrap(); assert_eq!(uploaded, data); @@ -190,12 +205,18 @@ async fn upload_stream_errors() { bucket.drop().await.unwrap(); // Error attempting to write to stream after closing. - let mut upload_stream = bucket.open_upload_stream("upload_stream_errors", None); + let mut upload_stream = bucket + .open_upload_stream("upload_stream_errors") + .await + .unwrap(); upload_stream.close().await.unwrap(); assert_closed(&bucket, upload_stream).await; // Error attempting to write to stream after abort. - let mut upload_stream = bucket.open_upload_stream("upload_stream_errors", None); + let mut upload_stream = bucket + .open_upload_stream("upload_stream_errors") + .await + .unwrap(); upload_stream.abort().await.unwrap(); assert_closed(&bucket, upload_stream).await; @@ -204,10 +225,11 @@ async fn upload_stream_errors() { } // Error attempting to write to stream after write failure. - let mut upload_stream = bucket.open_upload_stream( - "upload_stream_errors", - GridFsUploadOptions::builder().chunk_size_bytes(1).build(), - ); + let mut upload_stream = bucket + .open_upload_stream("upload_stream_errors") + .chunk_size_bytes(1) + .await + .unwrap(); let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)).error_code(1234); let _guard = client.configure_fail_point(fail_point).await.unwrap(); @@ -218,10 +240,11 @@ async fn upload_stream_errors() { assert_closed(&bucket, upload_stream).await; // Error attempting to write to stream after close failure. - let mut upload_stream = bucket.open_upload_stream( - "upload_stream_errors", - GridFsUploadOptions::builder().chunk_size_bytes(1).build(), - ); + let mut upload_stream = bucket + .open_upload_stream("upload_stream_errors") + .chunk_size_bytes(1) + .await + .unwrap(); upload_stream.write_all(&[11]).await.unwrap(); @@ -240,7 +263,10 @@ async fn drop_aborts() { let bucket = client.database("upload_stream_abort").gridfs_bucket(None); bucket.drop().await.unwrap(); - let mut upload_stream = bucket.open_upload_stream("upload_stream_abort", None); + let mut upload_stream = bucket + .open_upload_stream("upload_stream_abort") + .await + .unwrap(); let id = upload_stream.id().clone(); upload_stream.write_all(&[11]).await.unwrap(); drop(upload_stream); @@ -256,7 +282,10 @@ async fn write_future_dropped() { .gridfs_bucket(GridFsBucketOptions::builder().chunk_size_bytes(1).build()); bucket.drop().await.unwrap(); - let mut upload_stream = bucket.open_upload_stream("upload_stream_abort", None); + let mut upload_stream = bucket + .open_upload_stream("upload_stream_abort") + .await + .unwrap(); let chunks = vec![0u8; 100_000]; assert!( @@ -306,7 +335,7 @@ async fn assert_no_chunks_written(bucket: &GridFsBucket, id: &Bson) { assert!(bucket .chunks() .clone_with_type::() - .find_one(doc! { "files_id": id }, None) + .find_one(doc! { "files_id": id }) .await .unwrap() .is_none()); @@ -321,12 +350,12 @@ async fn test_gridfs_bucket_find_one() { let bucket = client.database("gridfs_find_one").gridfs_bucket(options); let filename = String::from("somefile"); - let mut upload_stream = bucket.open_upload_stream(&filename, None); + let mut upload_stream = bucket.open_upload_stream(&filename).await.unwrap(); upload_stream.write_all(data).await.unwrap(); upload_stream.close().await.unwrap(); let found = bucket - .find_one(doc! { "_id": upload_stream.id() }, None) + .find_one(doc! { "_id": upload_stream.id() }) .await .unwrap() .unwrap(); diff --git a/src/test/spec/index_management.rs b/src/test/spec/index_management.rs index 1f223550e..2b1967d0d 100644 --- a/src/test/spec/index_management.rs +++ b/src/test/spec/index_management.rs @@ -45,14 +45,13 @@ async fn search_index_create_list() { .name(String::from("test-search-index")) .definition(doc! { "mappings": { "dynamic": false } }) .build(), - None, ) .await .unwrap(); assert_eq!(name, "test-search-index"); let found = 'outer: loop { - let mut cursor = coll0.list_search_indexes(None, None, None).await.unwrap(); + let mut cursor = coll0.list_search_indexes().await.unwrap(); while let Some(d) = cursor.try_next().await.unwrap() { if d.get_str("name") == Ok("test-search-index") && d.get_bool("queryable") == Ok(true) { break 'outer d; @@ -87,19 +86,16 @@ async fn search_index_create_multiple() { let coll0 = db.collection::(&coll_name); let names = coll0 - .create_search_indexes( - [ - SearchIndexModel::builder() - .name(String::from("test-search-index-1")) - .definition(doc! { "mappings": { "dynamic": false } }) - .build(), - SearchIndexModel::builder() - .name(String::from("test-search-index-2")) - .definition(doc! { "mappings": { "dynamic": false } }) - .build(), - ], - None, - ) + .create_search_indexes([ + SearchIndexModel::builder() + .name(String::from("test-search-index-1")) + .definition(doc! { "mappings": { "dynamic": false } }) + .build(), + SearchIndexModel::builder() + .name(String::from("test-search-index-2")) + .definition(doc! { "mappings": { "dynamic": false } }) + .build(), + ]) .await .unwrap(); assert_eq!(names, ["test-search-index-1", "test-search-index-2"]); @@ -107,7 +103,7 @@ async fn search_index_create_multiple() { let mut index1 = None; let mut index2 = None; loop { - let mut cursor = coll0.list_search_indexes(None, None, None).await.unwrap(); + let mut cursor = coll0.list_search_indexes().await.unwrap(); while let Some(d) = cursor.try_next().await.unwrap() { if d.get_str("name") == Ok("test-search-index-1") && d.get_bool("queryable") == Ok(true) { @@ -159,14 +155,13 @@ async fn search_index_drop() { .name(String::from("test-search-index")) .definition(doc! { "mappings": { "dynamic": false } }) .build(), - None, ) .await .unwrap(); assert_eq!(name, "test-search-index"); 'outer: loop { - let mut cursor = coll0.list_search_indexes(None, None, None).await.unwrap(); + let mut cursor = coll0.list_search_indexes().await.unwrap(); while let Some(d) = cursor.try_next().await.unwrap() { if d.get_str("name") == Ok("test-search-index") && d.get_bool("queryable") == Ok(true) { break 'outer; @@ -178,13 +173,10 @@ async fn search_index_drop() { } } - coll0 - .drop_search_index("test-search-index", None) - .await - .unwrap(); + coll0.drop_search_index("test-search-index").await.unwrap(); loop { - let cursor = coll0.list_search_indexes(None, None, None).await.unwrap(); + let cursor = coll0.list_search_indexes().await.unwrap(); if !cursor.has_next() { break; } @@ -217,14 +209,13 @@ async fn search_index_update() { .name(String::from("test-search-index")) .definition(doc! { "mappings": { "dynamic": false } }) .build(), - None, ) .await .unwrap(); assert_eq!(name, "test-search-index"); 'outer: loop { - let mut cursor = coll0.list_search_indexes(None, None, None).await.unwrap(); + let mut cursor = coll0.list_search_indexes().await.unwrap(); while let Some(d) = cursor.try_next().await.unwrap() { if d.get_str("name") == Ok("test-search-index") && d.get_bool("queryable") == Ok(true) { break 'outer; @@ -240,13 +231,12 @@ async fn search_index_update() { .update_search_index( "test-search-index", doc! { "mappings": { "dynamic": true } }, - None, ) .await .unwrap(); let found = 'find: loop { - let mut cursor = coll0.list_search_indexes(None, None, None).await.unwrap(); + let mut cursor = coll0.list_search_indexes().await.unwrap(); while let Some(d) = cursor.try_next().await.unwrap() { if d.get_str("name") == Ok("test-search-index") && d.get_bool("queryable") == Ok(true) @@ -280,8 +270,5 @@ async fn search_index_drop_not_found() { .database("search_index_test") .collection::(&coll_name); - coll0 - .drop_search_index("test-search-index", None) - .await - .unwrap(); + coll0.drop_search_index("test-search-index").await.unwrap(); } diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.json index ba53b007b..9eaabe0d7 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.yml index 93ad19951..c0f617944 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.json index e9620efbe..fa887e089 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.yml index d60b7b620..49f66ae28 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Delete.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Delete.json index daaa09389..cce4faf18 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Delete.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Delete.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Delete.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Delete.yml index 0b969fd48..689d93a71 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Delete.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.json index 8500fa829..4392b6768 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.yml index 76bfe7ea2..69418e441 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.json index 7de45ba00..27ce7881d 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.yml index e978b9de5..9ad57efa7 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Update.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Update.json index d5b62be06..f7d5a6af6 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Update.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Update.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Update.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Update.yml index fe7d050f1..2dd35dfaa 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Update.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Date-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.json index 081bc577f..401ee34e3 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.json @@ -4,7 +4,8 @@ "minServerVersion": "7.0.0", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.yml index 0926988d2..4debfefc8 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Aggregate.yml @@ -6,6 +6,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.json index 12fe7c8bc..758d3e573 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.json @@ -4,7 +4,8 @@ "minServerVersion": "7.0.0", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.yml index 1961ed3e5..4eef897c4 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Correctness.yml @@ -8,6 +8,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.json index ac49d16a2..24a08f318 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.json @@ -4,7 +4,8 @@ "minServerVersion": "7.0.0", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.yml index bdb10e2d8..aad79c545 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Delete.yml @@ -6,6 +6,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json index 88a235078..2a8070ecf 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.json @@ -4,7 +4,8 @@ "minServerVersion": "7.0.0", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.yml index defccea0a..d71ba28c5 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-FindOneAndUpdate.yml @@ -6,6 +6,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.json index 54e43e4a2..2ef63f42b 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.json @@ -4,7 +4,8 @@ "minServerVersion": "7.0.0", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.yml index 51abaa642..9e70ff972 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-InsertFind.yml @@ -6,6 +6,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.json index b2b8136a9..8064eb1b1 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.json @@ -4,7 +4,8 @@ "minServerVersion": "7.0.0", "topology": [ "replicaset" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.yml index e79fd082e..f06c13a4e 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Decimal-Update.yml @@ -6,6 +6,8 @@ runOn: # FLE 2 Encrypted collections are not supported on standalone. # Tests for Decimal (without precision) must only run against a replica set. Decimal (without precision) queries are expected to take a long time and may exceed the default mongos timeout. topology: [ "replicaset" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json index b078d1817..8cf143c09 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.yml index 08f4a380c..43f1df686 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.json index 0859e702a..a4b06998f 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.yml index 18252b4bb..c4b037bde 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.json index 6e1ad90cd..fad823483 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.yml index ade385d20..cb10767df 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json index 1cfd19a1e..fb8f4f414 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml index 7100d5888..2c67b3638 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json index da7660972..79562802e 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.yml index 32785d658..f01401718 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.json index 2d201948c..cc93b7694 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.yml index eedd07608..22beb93e9 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DecimalPrecision-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.json index c188f1f5a..79f26660f 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.yml index 4fb95343b..83ca7fb90 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.json index 3e298127d..117e56af6 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.yml index 7289bb24d..5f91aead1 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Delete.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Delete.json index dc0ba435f..40d8ed5bb 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Delete.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Delete.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Delete.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Delete.yml index 2f42c1da1..def2bcb67 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Delete.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.json index 4b96575e1..f0893ce66 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.yml index d6573ff86..4bac3c138 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.json index 4827b6838..d3dc2f830 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.yml index 0122ba243..33b531f83 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Update.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Update.json index c3284ad0f..9d6a1fbfd 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Update.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Update.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Update.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Update.yml index 176db3971..65f50aecd 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Update.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Double-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.json index a2c1f3b75..4188685a2 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.yml index 134003bf9..4c3b3d66b 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.json index d0c0601ce..60f1ea7a3 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.yml index a8fc4ec2a..6f3259f32 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.json index a617442ee..4ed591d3f 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.yml index a6f83da78..fa0444753 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json index 5565fb179..d8fbbfae7 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml index 5def8d287..0f615d4b5 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.json index a1d8c1785..4213b066d 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.yml index 8900f79a8..107151449 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.json index 6ea99242b..89eb4c338 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.yml index 3e31f4018..b8ffbe9d4 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-DoublePrecision-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.json index b3b2826fa..686f0241b 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.yml index 13c350ea5..052a3006e 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.json index 4932223ba..2964624f2 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.yml index 1e7d5d47f..f7e4c53de 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Delete.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Delete.json index 03f816e4b..531b3e759 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Delete.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Delete.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Delete.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Delete.yml index ab1e9d2e5..ecc5eaa27 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Delete.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.json index d573f7b6a..402086cdb 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.yml index a33a5120a..9e878890f 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.json index 04953663f..965b8a551 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.yml index 4ef8c8e52..6e9594a1b 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Update.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Update.json index 4c7a3c278..6cf44ac78 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Update.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Update.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Update.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Update.yml index cf5716dab..a98c1a659 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Update.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Int-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.json index a7e77fd5c..6edb38a80 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.yml index cb5e42c15..5bc598daa 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Aggregate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.json index 365822c79..3d33f7381 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.yml index a7a33e274..01834f1c3 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Correctness.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Delete.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Delete.json index 17a01fe07..1b3278201 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Delete.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Delete.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Delete.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Delete.yml index 8dd1603f3..617794a17 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Delete.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Delete.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.json index 918d0dfee..b8e3b888a 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.yml index 0641988b9..1459ca106 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-FindOneAndUpdate.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.json index 9fafd10d4..d637fcf9e 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.yml index 076670d49..578c08c24 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-InsertFind.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Update.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Update.json index 20ac25bfa..1b76019a4 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Update.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Update.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Update.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Update.yml index 0aad7c441..db16c3dd6 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Update.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-Long-Update.yml @@ -5,6 +5,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-WrongType.json b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-WrongType.json index 5a6e650ab..704a693b8 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-WrongType.json +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-WrongType.json @@ -6,7 +6,8 @@ "replicaset", "sharded", "load-balanced" - ] + ], + "maxServerVersion": "7.99.99" } ], "database_name": "default", diff --git a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-WrongType.yml b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-WrongType.yml index b44e8c505..9f1a93386 100644 --- a/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-WrongType.yml +++ b/src/test/spec/json/client-side-encryption/legacy/fle2v2-Range-WrongType.yml @@ -7,6 +7,8 @@ runOn: # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Unskip once Serverless enables the QEv2 protocol. # FLE 2 Encrypted collections are not supported on standalone. topology: [ "replicaset", "sharded", "load-balanced" ] + # Skip tests for "rangePreview" algorithm on Server 8.0+. Server 8.0 drops "rangePreview" and adds "range". + maxServerVersion: "7.99.99" database_name: &database_name "default" collection_name: &collection_name "default" data: [] diff --git a/src/test/spec/oidc.rs b/src/test/spec/oidc.rs index 0b0e0f676..d3ec5f5b8 100644 --- a/src/test/spec/oidc.rs +++ b/src/test/spec/oidc.rs @@ -1,5 +1,4 @@ -use bson::Document; -use futures_util::FutureExt; +use bson::doc; use crate::{ client::{ @@ -9,20 +8,81 @@ use crate::{ test::log_uncaptured, Client, }; +use std::sync::{Arc, Mutex}; -type Result = anyhow::Result; +// Machine Callback tests +// Prose test 1.1 Single Principal Implicit Username +#[tokio::test] +async fn machine_single_principal_implicit_username() -> anyhow::Result<()> { + use bson::Document; + use futures_util::FutureExt; + + if std::env::var("OIDC_TOKEN_DIR").is_err() { + log_uncaptured("Skipping OIDC test"); + return Ok(()); + } + + // we need to assert that the callback is only called once + let call_count = Arc::new(Mutex::new(0)); + let cb_call_count = call_count.clone(); + + let mut opts = ClientOptions::parse("mongodb://localhost/?authMechanism=MONGODB-OIDC").await?; + opts.credential = Credential::builder() + .mechanism(AuthMechanism::MongoDbOidc) + .oidc_callback(oidc::Callback::machine(move |_| { + let call_count = cb_call_count.clone(); + *call_count.lock().unwrap() += 1; + async move { + Ok(oidc::IdpServerResponse { + access_token: tokio::fs::read_to_string("/tmp/tokens/test_user1").await?, + expires: None, + refresh_token: None, + }) + } + .boxed() + })) + .build() + .into(); + let client = Client::with_options(opts)?; + client + .database("test") + .collection::("test") + .find_one(doc! {}) + .await?; + assert_eq!(1, *(*call_count).lock().unwrap()); + Ok(()) +} +// Human Callback tests // Prose test 1.1 Single Principal Implicit Username #[tokio::test] -async fn single_principal_implicit_username() -> Result<()> { +async fn human_single_principal_implicit_username() -> anyhow::Result<()> { + use crate::{ + client::{ + auth::{oidc, AuthMechanism, Credential}, + options::ClientOptions, + }, + test::log_uncaptured, + Client, + }; + use bson::Document; + use futures_util::FutureExt; + if std::env::var("OIDC_TOKEN_DIR").is_err() { log_uncaptured("Skipping OIDC test"); return Ok(()); } + + // we need to assert that the callback is only called once + let call_count = Arc::new(Mutex::new(0)); + let cb_call_count = call_count.clone(); + let mut opts = ClientOptions::parse("mongodb://localhost/?authMechanism=MONGODB-OIDC").await?; - opts.credential = Some(Credential { - mechanism: Some(AuthMechanism::MongoDbOidc), - oidc_callbacks: Some(oidc::Callbacks::new(|_info, _params| { + opts.credential = Credential::builder() + .mechanism(AuthMechanism::MongoDbOidc) + .oidc_callback(oidc::Callback::human(move |_| { + let call_count = cb_call_count.clone(); + *call_count.lock().unwrap() += 1; async move { Ok(oidc::IdpServerResponse { access_token: tokio::fs::read_to_string("/tmp/tokens/test_user1").await?, @@ -31,14 +91,15 @@ async fn single_principal_implicit_username() -> Result<()> { }) } .boxed() - })), - ..Credential::default() - }); + })) + .build() + .into(); let client = Client::with_options(opts)?; client .database("test") .collection::("test") - .find_one(None, None) + .find_one(doc! {}) .await?; + assert_eq!(1, *(*call_count).lock().unwrap()); Ok(()) } diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index a8d0cfe68..c0fcdd2fc 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Duration}; +use std::{future::IntoFuture, time::Duration}; use bson::doc; @@ -8,14 +8,13 @@ use crate::{ cmap::{CmapEvent, ConnectionCheckoutFailedReason}, command::CommandEvent, }, - runtime, - runtime::AsyncJoinHandle, + runtime::{self, AsyncJoinHandle}, test::{ get_client_options, log_uncaptured, spec::{unified_runner::run_unified_tests, v2_runner::run_v2_tests}, + util::event_buffer::EventBuffer, Event, - EventHandler, FailPoint, FailPointMode, TestClient, @@ -51,27 +50,30 @@ async fn retry_releases_connection() { let collection = client .database("retry_releases_connection") .collection("retry_releases_connection"); - collection.insert_one(doc! { "x": 1 }, None).await.unwrap(); + collection.insert_one(doc! { "x": 1 }).await.unwrap(); let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)).close_connection(true); let _guard = client.configure_fail_point(fail_point).await.unwrap(); - runtime::timeout(Duration::from_secs(1), collection.find_one(doc! {}, None)) - .await - .expect("operation should not time out") - .expect("find should succeed"); + runtime::timeout( + Duration::from_secs(1), + collection.find_one(doc! {}).into_future(), + ) + .await + .expect("operation should not time out") + .expect("find should succeed"); } /// Prose test from retryable reads spec verifying that PoolClearedErrors are retried. #[tokio::test(flavor = "multi_thread")] async fn retry_read_pool_cleared() { - let handler = Arc::new(EventHandler::new()); + let buffer = EventBuffer::new(); let mut client_options = get_client_options().await.clone(); client_options.retry_reads = Some(true); client_options.max_pool_size = Some(1); - client_options.cmap_event_handler = Some(handler.clone().into()); - client_options.command_event_handler = Some(handler.clone().into()); + client_options.cmap_event_handler = Some(buffer.handler()); + client_options.command_event_handler = Some(buffer.handler()); // on sharded clusters, ensure only a single mongos is used if client_options.repl_set_name.is_none() { client_options.hosts.drain(1..); @@ -92,19 +94,20 @@ async fn retry_read_pool_cleared() { let collection = client .database("retry_read_pool_cleared") .collection("retry_read_pool_cleared"); - collection.insert_one(doc! { "x": 1 }, None).await.unwrap(); + collection.insert_one(doc! { "x": 1 }).await.unwrap(); let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)) .error_code(91) .block_connection(Duration::from_secs(1)); let _guard = client.configure_fail_point(fail_point).await.unwrap(); - let mut subscriber = handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); let mut tasks: Vec> = Vec::new(); for _ in 0..2 { let coll = collection.clone(); - let task = runtime::spawn(async move { coll.find_one(doc! {}, None).await }); + let task = runtime::spawn(async move { coll.find_one(doc! {}).await }); tasks.push(task); } @@ -148,7 +151,7 @@ async fn retry_read_pool_cleared() { ); } - assert_eq!(handler.get_command_started_events(&["find"]).len(), 3); + assert_eq!(buffer.get_command_started_events(&["find"]).len(), 3); } // Retryable Reads Are Retried on a Different mongos if One is Available @@ -182,6 +185,7 @@ async fn retry_read_different_mongos() { guards.push(client.configure_fail_point(fail_point).await.unwrap()); } + #[allow(deprecated)] let client = Client::test_builder() .options(client_options) .event_client() @@ -190,10 +194,14 @@ async fn retry_read_different_mongos() { let result = client .database("test") .collection::("retry_read_different_mongos") - .find(doc! {}, None) + .find(doc! {}) .await; assert!(result.is_err()); - let events = client.get_command_events(&["find"]); + #[allow(deprecated)] + let events = { + let mut events = client.events.clone(); + events.get_command_events(&["find"]) + }; assert!( matches!( &events[..], @@ -238,6 +246,7 @@ async fn retry_read_same_mongos() { client.configure_fail_point(fail_point).await.unwrap() }; + #[allow(deprecated)] let client = Client::test_builder() .options(client_options) .event_client() @@ -246,10 +255,14 @@ async fn retry_read_same_mongos() { let result = client .database("test") .collection::("retry_read_same_mongos") - .find(doc! {}, None) + .find(doc! {}) .await; assert!(result.is_ok(), "{:?}", result); - let events = client.get_command_events(&["find"]); + #[allow(deprecated)] + let events = { + let mut events = client.events.clone(); + events.get_command_events(&["find"]) + }; assert!( matches!( &events[..], diff --git a/src/test/spec/retryable_writes.rs b/src/test/spec/retryable_writes.rs index c94cbfac7..32b7e5bdd 100644 --- a/src/test/spec/retryable_writes.rs +++ b/src/test/spec/retryable_writes.rs @@ -9,6 +9,8 @@ use tokio::sync::Mutex; use test_file::{TestFile, TestResult}; +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ bson::{doc, Document}, error::{ErrorKind, Result, RETRYABLE_WRITE_ERROR}, @@ -16,7 +18,7 @@ use crate::{ cmap::{CmapEvent, ConnectionCheckoutFailedReason}, command::CommandEvent, }, - options::{ClientOptions, FindOptions, InsertManyOptions}, + options::ClientOptions, runtime, runtime::{spawn, AcknowledgedMessage, AsyncJoinHandle}, sdam::MIN_HEARTBEAT_FREQUENCY, @@ -26,10 +28,8 @@ use crate::{ log_uncaptured, run_spec_test, spec::unified_runner::run_unified_tests, - util::get_default_name, + util::{event_buffer::EventBuffer, get_default_name}, Event, - EventClient, - EventHandler, FailPoint, FailPointMode, TestClient, @@ -55,6 +55,7 @@ async fn run_legacy() { options.heartbeat_freq = Some(MIN_HEARTBEAT_FREQUENCY); } + #[allow(deprecated)] let client = EventClient::with_additional_options( options, Some(Duration::from_millis(50)), @@ -77,7 +78,7 @@ async fn run_legacy() { let coll = client.init_db_and_coll(&db_name, coll_name).await; if !test_file.data.is_empty() { - coll.insert_many(test_file.data.clone(), None) + coll.insert_many(test_file.data.clone()) .await .expect(&test_case.description); } @@ -162,9 +163,9 @@ async fn run_legacy() { }; let coll = client.get_coll(&db_name, &coll_name); - let options = FindOptions::builder().sort(doc! { "_id": 1 }).build(); let actual_data: Vec = coll - .find(None, options) + .find(doc! {}) + .sort(doc! { "_id": 1 }) .await .unwrap() .try_collect() @@ -182,6 +183,7 @@ async fn run_legacy() { #[tokio::test] #[function_name::named] async fn transaction_ids_excluded() { + #[allow(deprecated)] let client = EventClient::new().await; if !(client.is_replica_set() || client.is_sharded()) { @@ -191,8 +193,11 @@ async fn transaction_ids_excluded() { let coll = client.init_db_and_coll(function_name!(), "coll").await; - let excludes_txn_number = |command_name: &str| -> bool { - let (started, _) = client.get_successful_command_execution(command_name); + #[allow(deprecated)] + let mut events = client.events.clone(); + let mut excludes_txn_number = move |command_name: &str| -> bool { + #[allow(deprecated)] + let (started, _) = events.get_successful_command_execution(command_name); !started.command.contains_key("txnNumber") }; @@ -227,6 +232,7 @@ async fn transaction_ids_excluded() { #[tokio::test] #[function_name::named] async fn transaction_ids_included() { + #[allow(deprecated)] let client = EventClient::new().await; if !(client.is_replica_set() || client.is_sharded()) { @@ -236,12 +242,15 @@ async fn transaction_ids_included() { let coll = client.init_db_and_coll(function_name!(), "coll").await; - let includes_txn_number = |command_name: &str| -> bool { - let (started, _) = client.get_successful_command_execution(command_name); + #[allow(deprecated)] + let mut events = client.events.clone(); + let mut includes_txn_number = move |command_name: &str| -> bool { + #[allow(deprecated)] + let (started, _) = events.get_successful_command_execution(command_name); started.command.contains_key("txnNumber") }; - coll.insert_one(doc! { "x": 1 }, None).await.unwrap(); + coll.insert_one(doc! { "x": 1 }).await.unwrap(); assert!(includes_txn_number("insert")); coll.update_one(doc! {}, doc! { "$set": doc! { "x": 1 } }) @@ -249,35 +258,33 @@ async fn transaction_ids_included() { .unwrap(); assert!(includes_txn_number("update")); - coll.replace_one(doc! {}, doc! { "x": 1 }, None) - .await - .unwrap(); + coll.replace_one(doc! {}, doc! { "x": 1 }).await.unwrap(); assert!(includes_txn_number("update")); coll.delete_one(doc! {}).await.unwrap(); assert!(includes_txn_number("delete")); - coll.find_one_and_delete(doc! {}, None).await.unwrap(); + coll.find_one_and_delete(doc! {}).await.unwrap(); assert!(includes_txn_number("findAndModify")); - coll.find_one_and_replace(doc! {}, doc! { "x": 1 }, None) + coll.find_one_and_replace(doc! {}, doc! { "x": 1 }) .await .unwrap(); assert!(includes_txn_number("findAndModify")); - coll.find_one_and_update(doc! {}, doc! { "$set": doc! { "x": 1 } }, None) + coll.find_one_and_update(doc! {}, doc! { "$set": doc! { "x": 1 } }) .await .unwrap(); assert!(includes_txn_number("findAndModify")); - let options = InsertManyOptions::builder().ordered(true).build(); - coll.insert_many(vec![doc! { "x": 1 }], options) + coll.insert_many(vec![doc! { "x": 1 }]) + .ordered(true) .await .unwrap(); assert!(includes_txn_number("insert")); - let options = InsertManyOptions::builder().ordered(false).build(); - coll.insert_many(vec![doc! { "x": 1 }], options) + coll.insert_many(vec![doc! { "x": 1 }]) + .ordered(false) .await .unwrap(); assert!(includes_txn_number("insert")); @@ -311,7 +318,7 @@ async fn mmapv1_error_raised() { return; } - let err = coll.insert_one(doc! { "x": 1 }, None).await.unwrap_err(); + let err = coll.insert_one(doc! { "x": 1 }).await.unwrap_err(); match *err.kind { ErrorKind::Command(err) => { assert_eq!( @@ -372,7 +379,7 @@ async fn label_not_added(retry_reads: bool) { .await .unwrap(); - let err = coll.find(doc! {}, None).await.unwrap_err(); + let err = coll.find(doc! {}).await.unwrap_err(); assert!(!err.contains_label("RetryableWriteError")); } @@ -380,13 +387,13 @@ async fn label_not_added(retry_reads: bool) { /// Prose test from retryable writes spec verifying that PoolClearedErrors are retried. #[tokio::test(flavor = "multi_thread")] async fn retry_write_pool_cleared() { - let handler = Arc::new(EventHandler::new()); + let buffer = EventBuffer::new(); let mut client_options = get_client_options().await.clone(); client_options.retry_writes = Some(true); client_options.max_pool_size = Some(1); - client_options.cmap_event_handler = Some(handler.clone().into()); - client_options.command_event_handler = Some(handler.clone().into()); + client_options.cmap_event_handler = Some(buffer.handler()); + client_options.command_event_handler = Some(buffer.handler()); // on sharded clusters, ensure only a single mongos is used if client_options.repl_set_name.is_none() { client_options.hosts.drain(1..); @@ -420,12 +427,13 @@ async fn retry_write_pool_cleared() { .error_labels(vec![RETRYABLE_WRITE_ERROR]); let _guard = client.configure_fail_point(fail_point).await.unwrap(); - let mut subscriber = handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); let mut tasks: Vec> = Vec::new(); for _ in 0..2 { let coll = collection.clone(); - let task = runtime::spawn(async move { coll.insert_one(doc! {}, None).await }); + let task = runtime::spawn(async move { coll.insert_one(doc! {}).await }); tasks.push(task); } @@ -469,7 +477,7 @@ async fn retry_write_pool_cleared() { ); } - assert_eq!(handler.get_command_started_events(&["insert"]).len(), 3); + assert_eq!(buffer.get_command_started_events(&["insert"]).len(), 3); } /// Prose test from retryable writes spec verifying that the original error is returned after @@ -545,7 +553,7 @@ async fn retry_write_retryable_write_error() { let result = client .database("test") .collection::("test") - .insert_one(doc! { "hello": "there" }, None) + .insert_one(doc! { "hello": "there" }) .await; assert_eq!(result.unwrap_err().code(), Some(91)); @@ -585,6 +593,7 @@ async fn retry_write_different_mongos() { guards.push(client.configure_fail_point(fail_point).await.unwrap()); } + #[allow(deprecated)] let client = Client::test_builder() .options(client_options) .event_client() @@ -593,10 +602,14 @@ async fn retry_write_different_mongos() { let result = client .database("test") .collection::("retry_write_different_mongos") - .insert_one(doc! {}, None) + .insert_one(doc! {}) .await; assert!(result.is_err()); - let events = client.get_command_events(&["insert"]); + #[allow(deprecated)] + let events = { + let mut events = client.events.clone(); + events.get_command_events(&["insert"]) + }; assert!( matches!( &events[..], @@ -642,6 +655,7 @@ async fn retry_write_same_mongos() { client.configure_fail_point(fail_point).await.unwrap() }; + #[allow(deprecated)] let client = Client::test_builder() .options(client_options) .event_client() @@ -650,10 +664,14 @@ async fn retry_write_same_mongos() { let result = client .database("test") .collection::("retry_write_same_mongos") - .insert_one(doc! {}, None) + .insert_one(doc! {}) .await; assert!(result.is_ok(), "{:?}", result); - let events = client.get_command_events(&["insert"]); + #[allow(deprecated)] + let events = { + let mut events = client.events.clone(); + events.get_command_events(&["insert"]) + }; assert!( matches!( &events[..], diff --git a/src/test/spec/sdam.rs b/src/test/spec/sdam.rs index 547713cab..c60e35ab0 100644 --- a/src/test/spec/sdam.rs +++ b/src/test/spec/sdam.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Duration}; +use std::time::Duration; use bson::{doc, Document}; @@ -10,8 +10,8 @@ use crate::{ get_client_options, log_uncaptured, spec::unified_runner::run_unified_tests, + util::event_buffer::EventBuffer, Event, - EventHandler, FailPoint, FailPointMode, TestClient, @@ -51,10 +51,10 @@ async fn streaming_min_heartbeat_frequency() { return; } - let handler = Arc::new(EventHandler::new()); + let buffer = EventBuffer::new(); let mut options = get_client_options().await.clone(); options.heartbeat_freq = Some(Duration::from_millis(500)); - options.sdam_event_handler = Some(handler.clone().into()); + options.sdam_event_handler = Some(buffer.handler()); let hosts = options.hosts.clone(); @@ -70,8 +70,9 @@ async fn streaming_min_heartbeat_frequency() { // 500ms for 5 heartbeats. let mut tasks = Vec::new(); for address in hosts { - let h = handler.clone(); + let h = buffer.clone(); tasks.push(runtime::spawn(async move { + #[allow(deprecated)] let mut subscriber = h.subscribe(); for _ in 0..5 { let event = subscriber @@ -101,10 +102,10 @@ async fn heartbeat_frequency_is_respected() { return; } - let handler = Arc::new(EventHandler::new()); + let buffer = EventBuffer::new(); let mut options = get_client_options().await.clone(); options.heartbeat_freq = Some(Duration::from_millis(1000)); - options.sdam_event_handler = Some(handler.clone().into()); + options.sdam_event_handler = Some(buffer.handler()); let hosts = options.hosts.clone(); @@ -120,8 +121,9 @@ async fn heartbeat_frequency_is_respected() { // 1s for 3s. let mut tasks = Vec::new(); for address in hosts { - let h = handler.clone(); + let h = buffer.clone(); tasks.push(runtime::spawn(async move { + #[allow(deprecated)] let mut subscriber = h.subscribe(); // collect events for 2 seconds, should see between 2 and 3 heartbeats. @@ -165,24 +167,25 @@ async fn rtt_is_updated() { let app_name = "streamingRttTest"; - let handler = Arc::new(EventHandler::new()); + let buffer = EventBuffer::new(); let mut options = get_client_options().await.clone(); options.heartbeat_freq = Some(Duration::from_millis(500)); options.app_name = Some(app_name.to_string()); - options.sdam_event_handler = Some(handler.clone().into()); + options.sdam_event_handler = Some(buffer.handler()); options.hosts.drain(1..); options.direct_connection = Some(true); let host = options.hosts[0].clone(); let client = Client::with_options(options).unwrap(); - let mut subscriber = handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = buffer.subscribe(); // run a find to wait for the primary to be discovered client .database("foo") .collection::("bar") - .find(None, None) + .find(doc! {}) .await .unwrap(); @@ -226,3 +229,67 @@ async fn rtt_is_updated() { .await .unwrap(); } + +/* TODO RUST-1895 enable this +#[tokio::test(flavor = "multi_thread")] +async fn heartbeat_started_before_socket() { + use std::sync::{Arc, Mutex}; + use tokio::{io::AsyncReadExt, net::TcpListener}; + + #[derive(Debug, PartialEq)] + enum Event { + ClientConnected, + ClientHelloReceived, + HeartbeatStarted, + HeartbeatFailed, + } + let events: Arc>> = Arc::new(Mutex::new(vec![])); + + // Mock server + { + let listener = TcpListener::bind("127.0.0.1:9999").await.unwrap(); + let events = Arc::clone(&events); + tokio::spawn(async move { + loop { + let (mut socket, _) = listener.accept().await.unwrap(); + events.lock().unwrap().push(Event::ClientConnected); + let mut buf = [0; 1024]; + let _ = socket.read(&mut buf).await.unwrap(); + events.lock().unwrap().push(Event::ClientHelloReceived); + } + }); + } + + // Client setup + let mut options = ClientOptions::parse("mongodb://127.0.0.1:9999/") + .await + .unwrap(); + options.server_selection_timeout = Some(Duration::from_millis(500)); + { + let events = Arc::clone(&events); + options.sdam_event_handler = + Some(crate::event::EventHandler::callback(move |ev| match ev { + SdamEvent::ServerHeartbeatStarted(_) => { + events.lock().unwrap().push(Event::HeartbeatStarted) + } + SdamEvent::ServerHeartbeatFailed(_) => { + events.lock().unwrap().push(Event::HeartbeatFailed) + } + _ => (), + })); + } + let client = Client::with_options(options).unwrap(); + + // Test event order + let _ = client.list_database_names().await; + assert_eq!( + &[ + Event::HeartbeatStarted, + Event::ClientConnected, + Event::ClientHelloReceived, + Event::HeartbeatFailed + ], + &events.lock().unwrap()[0..4], + ); +} +*/ diff --git a/src/test/spec/sessions.rs b/src/test/spec/sessions.rs index 27994a0f6..df81b60aa 100644 --- a/src/test/spec/sessions.rs +++ b/src/test/spec/sessions.rs @@ -7,6 +7,8 @@ use std::{ use futures::TryStreamExt; use futures_util::{future::try_join_all, FutureExt}; +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ bson::{doc, Document}, client::options::ClientOptions, @@ -18,7 +20,6 @@ use crate::{ log_uncaptured, spec::unified_runner::run_unified_tests, util::Event, - EventClient, TestClient, }, Client, @@ -74,7 +75,8 @@ async fn explicit_session_created_on_same_client() { .database(function_name!()) .collection(function_name!()); let err = coll - .insert_one_with_session(doc! {}, None, &mut session0) + .insert_one(doc! {}) + .session(&mut session0) .await .unwrap_err(); match *err.kind { @@ -124,7 +126,12 @@ async fn implicit_session_after_connection() { fn ignore_val(r: Result) -> Result<()> { r.map(|_| ()) } - ops.push(coll.insert_one(doc! {}, None).map(ignore_val).boxed()); + ops.push( + coll.insert_one(doc! {}) + .into_future() + .map(ignore_val) + .boxed(), + ); ops.push( coll.delete_one(doc! {}) .into_future() @@ -138,23 +145,26 @@ async fn implicit_session_after_connection() { .boxed(), ); ops.push( - coll.find_one_and_delete(doc! {}, None) + coll.find_one_and_delete(doc! {}) + .into_future() .map(ignore_val) .boxed(), ); ops.push( - coll.find_one_and_update(doc! {}, doc! { "$set": { "a": 1 } }, None) + coll.find_one_and_update(doc! {}, doc! { "$set": { "a": 1 } }) + .into_future() .map(ignore_val) .boxed(), ); ops.push( - coll.find_one_and_replace(doc! {}, doc! { "a": 1 }, None) + coll.find_one_and_replace(doc! {}, doc! { "a": 1 }) + .into_future() .map(ignore_val) .boxed(), ); ops.push( async { - let cursor = coll.find(doc! {}, None).await.unwrap(); + let cursor = coll.find(doc! {}).await.unwrap(); let r: Result> = cursor.try_collect().await; r.map(|_| ()) } @@ -195,6 +205,7 @@ async fn implicit_session_after_connection() { ); } +#[allow(deprecated)] async fn spawn_mongocryptd(name: &str) -> Option<(EventClient, Process)> { let util_client = TestClient::new().await; if util_client.server_version_lt(4, 2) { @@ -217,6 +228,7 @@ async fn spawn_mongocryptd(name: &str) -> Option<(EventClient, Process)> { let options = ClientOptions::parse("mongodb://localhost:47017") .await .unwrap(); + #[allow(deprecated)] let client = EventClient::with_options(options).await; assert!(client.server_info.logical_session_timeout_minutes.is_none()); @@ -238,10 +250,11 @@ async fn sessions_not_supported_implicit_session_ignored() { return; }; - let mut subscriber = client.handler.subscribe(); + #[allow(deprecated)] + let mut subscriber = client.events.subscribe(); let coll = client.database(name).collection(name); - let _ = coll.find(doc! {}, None).await; + let _ = coll.find(doc! {}).await; let event = subscriber .filter_map_event(Duration::from_millis(500), |event| match event { Event::Command(CommandEvent::Started(command_started_event)) @@ -255,7 +268,7 @@ async fn sessions_not_supported_implicit_session_ignored() { .expect("Did not observe a command started event for find operation"); assert!(!event.command.contains_key("lsid")); - let _ = coll.insert_one(doc! { "x": 1 }, None).await; + let _ = coll.insert_one(doc! { "x": 1 }).await; let event = subscriber .filter_map_event(Duration::from_millis(500), |event| match event { Event::Command(CommandEvent::Started(command_started_event)) @@ -285,13 +298,15 @@ async fn sessions_not_supported_explicit_session_error() { let coll = client.database(name).collection(name); let error = coll - .find_one_with_session(doc! {}, None, &mut session) + .find_one(doc! {}) + .session(&mut session) .await .unwrap_err(); assert!(matches!(*error.kind, ErrorKind::SessionsNotSupported)); let error = coll - .insert_one_with_session(doc! { "x": 1 }, None, &mut session) + .insert_one(doc! { "x": 1 }) + .session(&mut session) .await .unwrap_err(); assert!(matches!(*error.kind, ErrorKind::SessionsNotSupported)); diff --git a/src/test/spec/trace.rs b/src/test/spec/trace.rs index 625fa2aa1..7fb9cbb09 100644 --- a/src/test/spec/trace.rs +++ b/src/test/spec/trace.rs @@ -3,7 +3,6 @@ use std::{collections::HashMap, iter, sync::Arc, time::Duration}; use crate::{ bson::{doc, Document}, client::options::ServerAddress, - coll::options::FindOptions, error::{ BulkWriteError, BulkWriteFailure, @@ -88,7 +87,7 @@ async fn command_logging_truncation_default_limit() { let mut tracing_subscriber = DEFAULT_GLOBAL_TRACING_HANDLER.subscribe(); let docs = iter::repeat(doc! { "x": "y" }).take(100); - coll.insert_many(docs, None) + coll.insert_many(docs) .await .expect("insert many should succeed"); @@ -105,7 +104,7 @@ async fn command_logging_truncation_default_limit() { let reply = succeeded.get_value_as_string("reply"); assert!(reply.len() <= DEFAULT_MAX_DOCUMENT_LENGTH_BYTES + 3); // +3 for trailing "..." - coll.find(None, None).await.expect("find should succeed"); + coll.find(doc! {}).await.expect("find should succeed"); let succeeded = tracing_subscriber .wait_for_event(Duration::from_millis(500), |e| { e.get_value_as_string("message") == "Command succeeded" @@ -179,7 +178,7 @@ async fn command_logging_truncation_mid_codepoint() { let mut tracing_subscriber = DEFAULT_GLOBAL_TRACING_HANDLER.subscribe(); let docs = iter::repeat(doc! { "🤔": "🤔🤔🤔🤔🤔🤔" }).take(10); - coll.insert_many(docs, None) + coll.insert_many(docs) .await .expect("insert many should succeed"); @@ -196,10 +195,8 @@ async fn command_logging_truncation_mid_codepoint() { // trailing "..." assert_eq!(command.len(), 221); - let find_options = FindOptions::builder() + coll.find(doc! {}) .projection(doc! { "_id": 0, "🤔": 1 }) - .build(); - coll.find(None, find_options) .await .expect("find should succeed"); let succeeded = tracing_subscriber @@ -363,7 +360,7 @@ fn selection_criteria_tracing_representation() { // non-primary read preferences with empty options - options should be omitted from // representation. - let empty_opts = ReadPreferenceOptions::builder().build(); + let empty_opts = Some(ReadPreferenceOptions::builder().build()); assert_eq!( SelectionCriteria::ReadPreference(ReadPreference::PrimaryPreferred { @@ -396,9 +393,11 @@ fn selection_criteria_tracing_representation() { let mut tag_set = HashMap::new(); tag_set.insert("a".to_string(), "b".to_string()); - let opts_with_tag_sets = ReadPreferenceOptions::builder() - .tag_sets(vec![tag_set.clone()]) - .build(); + let opts_with_tag_sets = Some( + ReadPreferenceOptions::builder() + .tag_sets(vec![tag_set.clone()]) + .build(), + ); assert_eq!( SelectionCriteria::ReadPreference(ReadPreference::PrimaryPreferred { @@ -408,9 +407,11 @@ fn selection_criteria_tracing_representation() { "ReadPreference { Mode: PrimaryPreferred, Tag Sets: [{\"a\": \"b\"}] }" ); - let opts_with_max_staleness = ReadPreferenceOptions::builder() - .max_staleness(Duration::from_millis(200)) - .build(); + let opts_with_max_staleness = Some( + ReadPreferenceOptions::builder() + .max_staleness(Duration::from_millis(200)) + .build(), + ); assert_eq!( SelectionCriteria::ReadPreference(ReadPreference::PrimaryPreferred { options: opts_with_max_staleness @@ -419,9 +420,11 @@ fn selection_criteria_tracing_representation() { "ReadPreference { Mode: PrimaryPreferred, Max Staleness: 200ms }" ); - let opts_with_hedge = ReadPreferenceOptions::builder() - .hedge(HedgedReadOptions::with_enabled(true)) - .build(); + let opts_with_hedge = Some( + ReadPreferenceOptions::builder() + .hedge(HedgedReadOptions::with_enabled(true)) + .build(), + ); assert_eq!( SelectionCriteria::ReadPreference(ReadPreference::PrimaryPreferred { options: opts_with_hedge @@ -430,10 +433,12 @@ fn selection_criteria_tracing_representation() { "ReadPreference { Mode: PrimaryPreferred, Hedge: true }" ); - let opts_with_multiple_options = ReadPreferenceOptions::builder() - .max_staleness(Duration::from_millis(200)) - .tag_sets(vec![tag_set]) - .build(); + let opts_with_multiple_options = Some( + ReadPreferenceOptions::builder() + .max_staleness(Duration::from_millis(200)) + .tag_sets(vec![tag_set]) + .build(), + ); assert_eq!( SelectionCriteria::ReadPreference(ReadPreference::PrimaryPreferred { options: opts_with_multiple_options diff --git a/src/test/spec/transactions.rs b/src/test/spec/transactions.rs index fcb9710d3..f0ec76856 100644 --- a/src/test/spec/transactions.rs +++ b/src/test/spec/transactions.rs @@ -77,15 +77,15 @@ async fn deserialize_recovery_token() { let coll = client .database(function_name!()) .collection(function_name!()); - coll.insert_one(A { num: 4 }, None).await.unwrap(); + coll.insert_one(A { num: 4 }).await.unwrap(); // Attempt to execute Find on a document with schema B. let coll: Collection = client .database(function_name!()) .collection(function_name!()); - session.start_transaction(None).await.unwrap(); + session.start_transaction().await.unwrap(); assert!(session.transaction.recovery_token.is_none()); - let result = coll.find_one_with_session(None, None, &mut session).await; + let result = coll.find_one(doc! {}).session(&mut session).await; assert!(result.is_err()); // Assert that the deserialization failed. // Nevertheless, the recovery token should have been retrieved from the ok: 1 response. @@ -94,6 +94,7 @@ async fn deserialize_recovery_token() { #[tokio::test] async fn convenient_api_custom_error() { + #[allow(deprecated)] let client = Client::test_builder().event_client().build().await; if !client.supports_transactions() { log_uncaptured("Skipping convenient_api_custom_error: no transaction support."); @@ -106,28 +107,27 @@ async fn convenient_api_custom_error() { struct MyErr; let result: Result<()> = session - .with_transaction( - coll, - |session, coll| { - async move { - coll.find_one_with_session(None, None, session).await?; - Err(Error::custom(MyErr)) - } - .boxed() - }, - None, - ) + .start_transaction() + .and_run(coll, |session, coll| { + async move { + coll.find_one(doc! {}).session(session).await?; + Err(Error::custom(MyErr)) + } + .boxed() + }) .await; assert!(result.is_err()); assert!(result.unwrap_err().get_custom::().is_some()); - let events = client.get_all_command_started_events(); + #[allow(deprecated)] + let events = client.events.get_all_command_started_events(); let commands: Vec<_> = events.iter().map(|ev| &ev.command_name).collect(); assert_eq!(&["find", "abortTransaction"], &commands[..]); } #[tokio::test] async fn convenient_api_returned_value() { + #[allow(deprecated)] let client = Client::test_builder().event_client().build().await; if !client.supports_transactions() { log_uncaptured("Skipping convenient_api_returned_value: no transaction support."); @@ -139,17 +139,14 @@ async fn convenient_api_returned_value() { .collection::("test_convenient"); let value = session - .with_transaction( - coll, - |session, coll| { - async move { - coll.find_one_with_session(None, None, session).await?; - Ok(42) - } - .boxed() - }, - None, - ) + .start_transaction() + .and_run(coll, |session, coll| { + async move { + coll.find_one(doc! {}).session(session).await?; + Ok(42) + } + .boxed() + }) .await .unwrap(); @@ -158,6 +155,7 @@ async fn convenient_api_returned_value() { #[tokio::test] async fn convenient_api_retry_timeout_callback() { + #[allow(deprecated)] let client = Client::test_builder().event_client().build().await; if !client.supports_transactions() { log_uncaptured("Skipping convenient_api_retry_timeout_callback: no transaction support."); @@ -170,19 +168,16 @@ async fn convenient_api_retry_timeout_callback() { .collection::("test_convenient"); let result: Result<()> = session - .with_transaction( - coll, - |session, coll| { - async move { - coll.find_one_with_session(None, None, session).await?; - let mut err = Error::custom(42); - err.add_label(TRANSIENT_TRANSACTION_ERROR); - Err(err) - } - .boxed() - }, - None, - ) + .start_transaction() + .and_run(coll, |session, coll| { + async move { + coll.find_one(doc! {}).session(session).await?; + let mut err = Error::custom(42); + err.add_label(TRANSIENT_TRANSACTION_ERROR); + Err(err) + } + .boxed() + }) .await; let err = result.unwrap_err(); @@ -197,6 +192,7 @@ async fn convenient_api_retry_timeout_commit_unknown() { options.direct_connection = Some(true); options.hosts.drain(1..); } + #[allow(deprecated)] let client = Client::test_builder() .options(options) .event_client() @@ -220,17 +216,14 @@ async fn convenient_api_retry_timeout_commit_unknown() { let _guard = client.configure_fail_point(fail_point).await.unwrap(); let result = session - .with_transaction( - coll, - |session, coll| { - async move { - coll.find_one_with_session(None, None, session).await?; - Ok(()) - } - .boxed() - }, - None, - ) + .start_transaction() + .and_run(coll, |session, coll| { + async move { + coll.find_one(doc! {}).session(session).await?; + Ok(()) + } + .boxed() + }) .await; let err = result.unwrap_err(); @@ -244,6 +237,7 @@ async fn convenient_api_retry_timeout_commit_transient() { options.direct_connection = Some(true); options.hosts.drain(1..); } + #[allow(deprecated)] let client = Client::test_builder() .options(options) .event_client() @@ -267,17 +261,14 @@ async fn convenient_api_retry_timeout_commit_transient() { let _guard = client.configure_fail_point(fail_point).await.unwrap(); let result = session - .with_transaction( - coll, - |session, coll| { - async move { - coll.find_one_with_session(None, None, session).await?; - Ok(()) - } - .boxed() - }, - None, - ) + .start_transaction() + .and_run(coll, |session, coll| { + async move { + coll.find_one(doc! {}).session(session).await?; + Ok(()) + } + .boxed() + }) .await; let err = result.unwrap_err(); diff --git a/src/test/spec/unified_runner.rs b/src/test/spec/unified_runner.rs index 1e914da65..07ddf57ce 100644 --- a/src/test/spec/unified_runner.rs +++ b/src/test/spec/unified_runner.rs @@ -1,6 +1,5 @@ pub(crate) mod entity; pub(crate) mod matcher; -pub(crate) mod observer; pub(crate) mod operation; pub(crate) mod test_event; pub(crate) mod test_file; diff --git a/src/test/spec/unified_runner/entity.rs b/src/test/spec/unified_runner/entity.rs index 84086bebb..705d7cb57 100644 --- a/src/test/spec/unified_runner/entity.rs +++ b/src/test/spec/unified_runner/entity.rs @@ -1,12 +1,14 @@ use std::{ fs::File, - io::BufWriter, + io::{BufWriter, Write}, ops::{Deref, DerefMut}, sync::Arc, time::Duration, }; -use tokio::sync::{mpsc, oneshot, Mutex}; +use bson::to_document; +use time::OffsetDateTime; +use tokio::sync::{mpsc, oneshot, Mutex, RwLock}; use crate::{ bson::{Bson, Document}, @@ -19,8 +21,8 @@ use crate::{ sdam::TopologyDescription, test::{ spec::unified_runner::{ExpectedEventType, ObserveEvent}, + util::event_buffer::EventBuffer, Event, - EventHandler, }, Client, ClientSession, @@ -30,7 +32,7 @@ use crate::{ SessionCursor, }; -use super::{observer::EventObserver, test_file::ThreadMessage, Operation}; +use super::{events_match, test_file::ThreadMessage, EntityMap, ExpectedEvent, Operation}; #[derive(Debug)] #[allow(clippy::large_enum_variant)] @@ -62,8 +64,7 @@ pub(crate) struct ClientEntity { /// This is None if a `close` operation has been executed for this entity. pub(crate) client: Option, pub(crate) topology_id: bson::oid::ObjectId, - handler: Arc, - pub(crate) observer: Arc>, + events: EventBuffer, observe_events: Option>, ignore_command_names: Option>, observe_sensitive_commands: bool, @@ -128,22 +129,19 @@ impl TestCursor { impl ClientEntity { pub(crate) fn new( - client_options: ClientOptions, - handler: Arc, + mut client_options: ClientOptions, observe_events: Option>, ignore_command_names: Option>, observe_sensitive_commands: bool, ) -> Self { - // ensure the observer is already listening before the client is created to avoid any races - // around collecting initial events when the topology opens. - let observer = EventObserver::new(handler.broadcaster().subscribe()); + let events = EventBuffer::new(); + events.register(&mut client_options); let client = Client::with_options(client_options).unwrap(); let topology_id = client.topology().id; Self { client: Some(client), topology_id, - handler, - observer: Arc::new(Mutex::new(observer)), + events, observe_events, ignore_command_names, observe_sensitive_commands, @@ -154,19 +152,63 @@ impl ClientEntity { /// Ignores any event with a name in the ignore list. Also ignores all configureFailPoint /// events. pub(crate) fn get_filtered_events(&self, expected_type: ExpectedEventType) -> Vec { - self.handler.get_filtered_events(expected_type, |event| { - if let Event::Command(cev) = event { - if !self.allow_command_event(cev) { + self.events + .all() + .into_iter() + .filter(|event| { + if !expected_type.matches(event) { return false; } - } - if let Some(observe_events) = self.observe_events.as_ref() { - if !observe_events.iter().any(|observe| observe.matches(event)) { - return false; + if let Event::Command(cev) = event { + if !self.allow_command_event(cev) { + return false; + } + } + if let Some(observe_events) = self.observe_events.as_ref() { + if !observe_events.iter().any(|observe| observe.matches(event)) { + return false; + } + } + true + }) + .collect() + } + + pub(crate) fn matching_events( + &self, + expected: &ExpectedEvent, + entities: &EntityMap, + ) -> Vec { + self.events + .all() + .into_iter() + .filter(|e| events_match(e, expected, Some(entities)).is_ok()) + .collect() + } + + pub(crate) async fn wait_for_matching_events( + &self, + expected: &ExpectedEvent, + count: usize, + entities: Arc>, + ) -> Result<()> { + crate::runtime::timeout(Duration::from_secs(10), async { + loop { + let (events, notified) = self.events.watch_all(); + let matched = { + let entities = &*entities.read().await; + events + .into_iter() + .filter(|e| events_match(e, expected, Some(entities)).is_ok()) + .count() + }; + if matched >= count { + return Ok(()); } + notified.await; } - true }) + .await? } /// Returns `true` if a given `CommandEvent` is allowed to be observed. @@ -201,17 +243,51 @@ impl ClientEntity { /// Gets all events of type commandStartedEvent, excluding configureFailPoint events. pub(crate) fn get_all_command_started_events(&self) -> Vec { - self.handler.get_all_command_started_events() + self.events + .all() + .into_iter() + .filter_map(|ev| match ev { + Event::Command(CommandEvent::Started(ev)) + if ev.command_name != "configureFailPoint" => + { + Some(ev) + } + _ => None, + }) + .collect() } /// Writes all events with the given name to the given BufWriter. - pub fn write_events_list_to_file(&self, names: &[&str], writer: &mut BufWriter) { - self.handler.write_events_list_to_file(names, writer); + pub(crate) fn write_events_list_to_file(&self, names: &[&str], writer: &mut BufWriter) { + let mut add_comma = false; + let mut write_json = |mut event: Document, name: &str, time: &OffsetDateTime| { + event.insert("name", name); + event.insert("observedAt", time.unix_timestamp()); + let mut json_string = serde_json::to_string(&event).unwrap(); + if add_comma { + json_string.insert(0, ','); + } else { + add_comma = true; + } + write!(writer, "{}", json_string).unwrap(); + }; + + for (event, time) in self.events.all_timed() { + let name = match &event { + Event::Command(ev) => ev.name(), + Event::Sdam(ev) => ev.name(), + Event::Cmap(ev) => ev.planned_maintenance_testing_name(), + }; + if names.contains(&name) { + let ev_doc = to_document(&event).unwrap(); + write_json(ev_doc, name, &time); + } + } } /// Gets the count of connections currently checked out. pub(crate) fn connections_checked_out(&self) -> u32 { - self.handler.connections_checked_out() + self.events.connections_checked_out() } /// Synchronize all connection pool worker threads. diff --git a/src/test/spec/unified_runner/observer.rs b/src/test/spec/unified_runner/observer.rs deleted file mode 100644 index 14db529c5..000000000 --- a/src/test/spec/unified_runner/observer.rs +++ /dev/null @@ -1,105 +0,0 @@ -use tokio::sync::{ - broadcast::{ - self, - error::{RecvError, TryRecvError}, - }, - RwLock, -}; - -use std::{sync::Arc, time::Duration}; - -use crate::{ - error::{Error, Result}, - runtime, - test::Event, -}; - -use super::{events_match, EntityMap, ExpectedEvent}; - -// TODO: RUST-1424: consolidate this with `EventHandler` -/// Observer used to cache all the seen events for a given client in a unified test. -/// Used to implement assertEventCount and waitForEvent operations. -#[derive(Debug)] -pub(crate) struct EventObserver { - seen_events: Vec, - receiver: broadcast::Receiver, -} - -impl EventObserver { - pub fn new(receiver: broadcast::Receiver) -> Self { - Self { - seen_events: Vec::new(), - receiver, - } - } - - pub(crate) async fn recv(&mut self) -> Option { - match self.receiver.recv().await { - Ok(e) => { - self.seen_events.push(e.clone()); - Some(e) - } - Err(RecvError::Lagged(_)) => panic!("event receiver lagged"), - Err(RecvError::Closed) => None, - } - } - - fn try_recv(&mut self) -> Option { - match self.receiver.try_recv() { - Ok(e) => { - self.seen_events.push(e.clone()); - Some(e) - } - Err(TryRecvError::Lagged(_)) => panic!("event receiver lagged"), - Err(TryRecvError::Closed | TryRecvError::Empty) => None, - } - } - - pub(crate) async fn matching_events( - &mut self, - event: &ExpectedEvent, - entities: Arc>, - ) -> Vec { - // first retrieve all the events buffered in the channel - while self.try_recv().is_some() {} - let es = entities.read().await; - // Then collect all matching events. - self.seen_events - .iter() - .filter(|e| events_match(e, event, Some(&es)).is_ok()) - .cloned() - .collect() - } - - pub(crate) async fn wait_for_matching_events( - &mut self, - event: &ExpectedEvent, - count: usize, - entities: Arc>, - ) -> Result<()> { - let mut seen = self.matching_events(event, entities.clone()).await.len(); - - if seen >= count { - return Ok(()); - } - - runtime::timeout(Duration::from_secs(10), async { - while let Some(e) = self.recv().await { - let es = entities.read().await; - if events_match(&e, event, Some(&es)).is_ok() { - seen += 1; - if seen == count { - return Ok(()); - } - } - } - Err(Error::internal(format!( - "ran out of events before, only saw {} of {}", - seen, count - ))) - }) - .await??; - - Ok(()) - } -} diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index 701dacd05..f1ed154df 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -19,6 +19,7 @@ use futures::{ future::BoxFuture, io::AsyncReadExt, stream::{StreamExt, TryStreamExt}, + AsyncWriteExt, FutureExt, }; use serde::{ @@ -575,12 +576,11 @@ impl Find { selection_criteria: None, let_vars: self.let_vars.clone(), }; + let act = collection.find(self.filter.clone()).with_options(options); match &self.session { Some(session_id) => { let cursor = with_mut_session!(test_runner, session_id, |session| async { - collection - .find_with_session(self.filter.clone(), options, session) - .await + act.session(session.deref_mut()).await }) .await?; Ok(TestCursor::Session { @@ -589,7 +589,7 @@ impl Find { }) } None => { - let cursor = collection.find(self.filter.clone(), options).await?; + let cursor = act.await?; Ok(TestCursor::Normal(Mutex::new(cursor))) } } @@ -718,27 +718,17 @@ impl TestOperation for InsertMany { ) -> BoxFuture<'a, Result>> { async move { let collection = test_runner.get_collection(id).await; + let action = collection + .insert_many(&self.documents) + .with_options(self.options.clone()); let result = match &self.session { Some(session_id) => { with_mut_session!(test_runner, session_id, |session| { - async move { - collection - .insert_many_with_session( - self.documents.clone(), - self.options.clone(), - session, - ) - .await - } - .boxed() + async move { action.session(session.deref_mut()).await }.boxed() }) .await? } - None => { - collection - .insert_many(self.documents.clone(), self.options.clone()) - .await? - } + None => action.await?, }; let ids: HashMap = result .inserted_ids @@ -769,24 +759,17 @@ impl TestOperation for InsertOne { ) -> BoxFuture<'a, Result>> { async move { let collection = test_runner.get_collection(id).await; + let action = collection + .insert_one(self.document.clone()) + .with_options(self.options.clone()); let result = match &self.session { Some(session_id) => { with_mut_session!(test_runner, session_id, |session| async { - collection - .insert_one_with_session( - self.document.clone(), - self.options.clone(), - session, - ) - .await + action.session(session.deref_mut()).await }) .await? } - None => { - collection - .insert_one(self.document.clone(), self.options.clone()) - .await? - } + None => action.await?, }; let result = to_bson(&result)?; Ok(Some(result.into())) @@ -1079,7 +1062,8 @@ impl TestOperation for FindOne { async move { let collection = test_runner.get_collection(id).await; let result = collection - .find_one(self.filter.clone(), self.options.clone()) + .find_one(self.filter.clone().unwrap_or_default()) + .with_options(self.options.clone()) .await?; match result { Some(result) => Ok(Some(Bson::from(result).into())), @@ -1246,11 +1230,8 @@ impl TestOperation for ReplaceOne { async move { let collection = test_runner.get_collection(id).await; let result = collection - .replace_one( - self.filter.clone(), - self.replacement.clone(), - self.options.clone(), - ) + .replace_one(self.filter.clone(), self.replacement.clone()) + .with_options(self.options.clone()) .await?; let result = to_bson(&result)?; Ok(Some(result.into())) @@ -1277,29 +1258,17 @@ impl TestOperation for FindOneAndUpdate { ) -> BoxFuture<'a, Result>> { async move { let collection = test_runner.get_collection(id).await; + let act = collection + .find_one_and_update(self.filter.clone(), self.update.clone()) + .with_options(self.options.clone()); let result = match &self.session { Some(session_id) => { with_mut_session!(test_runner, session_id, |session| async { - collection - .find_one_and_update_with_session( - self.filter.clone(), - self.update.clone(), - self.options.clone(), - session, - ) - .await + act.session(session.deref_mut()).await }) .await? } - None => { - collection - .find_one_and_update( - self.filter.clone(), - self.update.clone(), - self.options.clone(), - ) - .await? - } + None => act.await?, }; let result = to_bson(&result)?; Ok(Some(result.into())) @@ -1326,11 +1295,8 @@ impl TestOperation for FindOneAndReplace { async move { let collection = test_runner.get_collection(id).await; let result = collection - .find_one_and_replace( - self.filter.clone(), - self.replacement.clone(), - self.options.clone(), - ) + .find_one_and_replace(self.filter.clone(), self.replacement.clone()) + .with_options(self.options.clone()) .await?; let result = to_bson(&result)?; @@ -1357,7 +1323,8 @@ impl TestOperation for FindOneAndDelete { async move { let collection = test_runner.get_collection(id).await; let result = collection - .find_one_and_delete(self.filter.clone(), self.options.clone()) + .find_one_and_delete(self.filter.clone()) + .with_options(self.options.clone()) .await?; let result = to_bson(&result)?; Ok(Some(result.into())) @@ -1889,7 +1856,7 @@ impl TestOperation for StartTransaction { ) -> BoxFuture<'a, Result>> { async move { with_mut_session!(test_runner, id, |session| { - async move { session.start_transaction(None).await } + async move { session.start_transaction().await } }) .await?; Ok(None) @@ -2580,13 +2547,8 @@ impl TestOperation for AssertEventCount { ) -> BoxFuture<'a, ()> { async { let client = test_runner.get_client(self.client.as_str()).await; - let entities = test_runner.entities.clone(); - let actual_events = client - .observer - .lock() - .await - .matching_events(&self.event, entities) - .await; + let entities = test_runner.entities.read().await; + let actual_events = client.matching_events(&self.event, &entities); assert_eq!( actual_events.len(), self.count, @@ -2617,9 +2579,6 @@ impl TestOperation for WaitForEvent { let client = test_runner.get_client(self.client.as_str()).await; let entities = test_runner.entities.clone(); client - .observer - .lock() - .await .wait_for_matching_events(&self.event, self.count, entities) .await .unwrap(); @@ -2767,7 +2726,9 @@ impl TestOperation for Download { // First, read via the download_to_writer API. let mut buf: Vec = vec![]; bucket - .download_to_futures_0_3_writer(self.id.clone(), &mut buf) + .open_download_stream(self.id.clone()) + .await? + .read_to_end(&mut buf) .await?; let writer_data = hex::encode(buf); @@ -2803,28 +2764,15 @@ impl TestOperation for DownloadByName { async move { let bucket = test_runner.get_bucket(id).await; - // First, read via the download_to_writer API. let mut buf: Vec = vec![]; bucket - .download_to_futures_0_3_writer_by_name( - self.filename.clone(), - &mut buf, - self.options.clone(), - ) + .open_download_stream_by_name(&self.filename) + .with_options(self.options.clone()) + .await? + .read_to_end(&mut buf) .await?; let writer_data = hex::encode(buf); - // Next, read via the open_download_stream API. - let mut buf: Vec = vec![]; - let mut stream = bucket - .open_download_stream_by_name(self.filename.clone(), self.options.clone()) - .await?; - stream.read_to_end(&mut buf).await?; - let stream_data = hex::encode(buf); - - // Assert that both APIs returned the same data. - assert_eq!(writer_data, stream_data); - Ok(Some(Entity::Bson(writer_data.into()))) } .boxed() @@ -2877,15 +2825,17 @@ impl TestOperation for Upload { let hex_string = self.source.get("$$hexBytes").unwrap().as_str().unwrap(); let bytes = hex::decode(hex_string).unwrap(); - let id = bucket - .upload_from_futures_0_3_reader( - self.filename.clone(), - &bytes[..], - self.options.clone(), - ) - .await?; + let id = { + let mut stream = bucket + .open_upload_stream(&self.filename) + .with_options(self.options.clone()) + .await?; + stream.write_all(&bytes[..]).await?; + stream.close().await?; + stream.id().clone() + }; - Ok(Some(Entity::Bson(id.into()))) + Ok(Some(Entity::Bson(id))) } .boxed() } diff --git a/src/test/spec/unified_runner/operation/search_index.rs b/src/test/spec/unified_runner/operation/search_index.rs index 461d53df5..e09825865 100644 --- a/src/test/spec/unified_runner/operation/search_index.rs +++ b/src/test/spec/unified_runner/operation/search_index.rs @@ -4,6 +4,7 @@ use futures_util::{FutureExt, TryStreamExt}; use serde::Deserialize; use crate::{ + action::Action, coll::options::AggregateOptions, error::Result, search_index::options::{ @@ -35,7 +36,8 @@ impl TestOperation for CreateSearchIndex { async move { let collection = test_runner.get_collection(id).await; let name = collection - .create_search_index(self.model.clone(), self.options.clone()) + .create_search_index(self.model.clone()) + .with_options(self.options.clone()) .await?; Ok(Some(Bson::String(name).into())) } @@ -60,7 +62,8 @@ impl TestOperation for CreateSearchIndexes { async move { let collection = test_runner.get_collection(id).await; let names = collection - .create_search_indexes(self.models.clone(), self.options.clone()) + .create_search_indexes(self.models.clone()) + .with_options(self.options.clone()) .await?; Ok(Some(to_bson(&names)?.into())) } @@ -85,7 +88,8 @@ impl TestOperation for DropSearchIndex { async move { let collection = test_runner.get_collection(id).await; collection - .drop_search_index(&self.name, self.options.clone()) + .drop_search_index(&self.name) + .with_options(self.options.clone()) .await?; Ok(None) } @@ -111,11 +115,12 @@ impl TestOperation for ListSearchIndexes { async move { let collection = test_runner.get_collection(id).await; let cursor = collection - .list_search_indexes( - self.name.as_deref(), - self.aggregation_options.clone(), - self.options.clone(), - ) + .list_search_indexes() + .optional(self.name.clone(), |a, n| a.name(n)) + .optional(self.aggregation_options.clone(), |a, o| { + a.aggregate_options(o) + }) + .with_options(self.options.clone()) .await?; let values: Vec<_> = cursor.try_collect().await?; Ok(Some(to_bson(&values)?.into())) @@ -142,7 +147,8 @@ impl TestOperation for UpdateSearchIndex { async move { let collection = test_runner.get_collection(id).await; collection - .update_search_index(&self.name, self.definition.clone(), self.options.clone()) + .update_search_index(&self.name, self.definition.clone()) + .with_options(self.options.clone()) .await?; Ok(None) } diff --git a/src/test/spec/unified_runner/test_file.rs b/src/test/spec/unified_runner/test_file.rs index ab06af679..0a8d570dc 100644 --- a/src/test/spec/unified_runner/test_file.rs +++ b/src/test/spec/unified_runner/test_file.rs @@ -27,7 +27,7 @@ use crate::{ WriteConcern, }, serde_util, - test::{Serverless, TestClient, DEFAULT_URI}, + test::{Event, Serverless, TestClient, DEFAULT_URI}, }; #[derive(Debug, Deserialize)] @@ -426,6 +426,22 @@ pub(crate) enum ExpectedEventType { Sdam, } +impl ExpectedEventType { + pub(crate) fn matches(&self, event: &Event) -> bool { + match (self, event) { + (ExpectedEventType::Cmap, Event::Cmap(_)) => true, + ( + ExpectedEventType::CmapWithoutConnectionReady, + Event::Cmap(crate::event::cmap::CmapEvent::ConnectionReady(_)), + ) => false, + (ExpectedEventType::CmapWithoutConnectionReady, Event::Cmap(_)) => true, + (ExpectedEventType::Command, Event::Command(_)) => true, + (ExpectedEventType::Sdam, Event::Sdam(_)) => true, + _ => false, + } + } +} + #[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize)] #[serde(rename_all = "camelCase", deny_unknown_fields)] pub(crate) enum EventMatch { @@ -668,11 +684,16 @@ fn deserialize_selection_criteria() { match selection_criteria { SelectionCriteria::ReadPreference(read_preference) => match read_preference { - ReadPreference::SecondaryPreferred { options } => { + ReadPreference::SecondaryPreferred { + options: Some(options), + } => { assert_eq!(options.max_staleness, Some(Duration::from_secs(100))); assert_eq!(options.hedge, Some(HedgedReadOptions::with_enabled(true))); } - other => panic!("Expected mode SecondaryPreferred, got {:?}", other), + other => panic!( + "Expected mode SecondaryPreferred with options, got {:?}", + other + ), }, SelectionCriteria::Predicate(_) => panic!("Expected read preference, got predicate"), } diff --git a/src/test/spec/unified_runner/test_runner.rs b/src/test/spec/unified_runner/test_runner.rs index 891701af7..96fa0afcb 100644 --- a/src/test/spec/unified_runner/test_runner.rs +++ b/src/test/spec/unified_runner/test_runner.rs @@ -7,16 +7,9 @@ use tokio::sync::{mpsc, RwLock}; use crate::{ bson::{doc, Document}, client::options::ClientOptions, - concern::{Acknowledgment, WriteConcern}, + concern::WriteConcern, gridfs::GridFsBucket, - options::{ - CollectionOptions, - CreateCollectionOptions, - FindOptions, - ReadConcern, - ReadPreference, - SelectionCriteria, - }, + options::{CollectionOptions, ReadConcern, ReadPreference, SelectionCriteria}, runtime, sdam::{TopologyDescription, MIN_HEARTBEAT_FREQUENCY}, test::{ @@ -29,7 +22,6 @@ use crate::{ }, update_options_for_testing, util::FailPointGuard, - EventHandler, TestClient, DEFAULT_URI, LOAD_BALANCED_MULTIPLE_URI, @@ -37,6 +29,8 @@ use crate::{ SERVERLESS, SERVER_API, }, + ClientSession, + ClusterTime, Collection, Database, }; @@ -84,6 +78,7 @@ pub(crate) struct TestRunner { pub(crate) internal_client: TestClient, pub(crate) entities: Arc>, pub(crate) fail_point_guards: Arc>>, + pub(crate) cluster_time: Arc>>, } impl TestRunner { @@ -92,6 +87,7 @@ impl TestRunner { internal_client: TestClient::new().await, entities: Default::default(), fail_point_guards: Default::default(), + cluster_time: Default::default(), } } @@ -101,6 +97,7 @@ impl TestRunner { internal_client: TestClient::with_options(Some(options)).await, entities: Arc::new(RwLock::new(EntityMap::new())), fail_point_guards: Arc::new(RwLock::new(Vec::new())), + cluster_time: Default::default(), } } @@ -207,9 +204,11 @@ impl TestRunner { log_uncaptured(format!("Executing {:?}", &test_case.description)); if let Some(ref initial_data) = test_file.initial_data { + let mut session = self.internal_client.start_session().await.unwrap(); for data in initial_data { - self.insert_initial_data(data).await; + self.insert_initial_data(data, &mut session).await; } + *self.cluster_time.write().await = session.cluster_time().cloned(); } self.entities.write().await.clear(); @@ -357,9 +356,9 @@ impl TestRunner { .internal_client .get_coll_with_options(db_name, coll_name, options); - let options = FindOptions::builder().sort(doc! { "_id": 1 }).build(); let actual_data: Vec = collection - .find(doc! {}, options) + .find(doc! {}) + .sort(doc! { "_id": 1 }) .await .unwrap() .try_collect() @@ -372,35 +371,37 @@ impl TestRunner { } } - pub(crate) async fn insert_initial_data(&self, data: &CollectionData) { - let write_concern = WriteConcern::builder().w(Acknowledgment::Majority).build(); - + pub(crate) async fn insert_initial_data( + &self, + data: &CollectionData, + session: &mut ClientSession, + ) { if !data.documents.is_empty() { let collection_options = CollectionOptions::builder() - .write_concern(write_concern) + .write_concern(WriteConcern::majority()) .build(); - let coll = self - .internal_client - .init_db_and_coll_with_options( - &data.database_name, - &data.collection_name, - collection_options, - ) - .await; - coll.insert_many(data.documents.clone(), None) + let coll = self.internal_client.get_coll_with_options( + &data.database_name, + &data.collection_name, + collection_options, + ); + coll.drop().session(&mut *session).await.unwrap(); + coll.insert_many(data.documents.clone()) + .session(session) .await .unwrap(); } else { - let collection_options = CreateCollectionOptions::builder() - .write_concern(write_concern) - .build(); + let coll = self + .internal_client + .get_coll(&data.database_name, &data.collection_name); + coll.drop().session(&mut *session).await.unwrap(); self.internal_client - .create_fresh_collection( - &data.database_name, - &data.collection_name, - collection_options, - ) - .await; + .database(&data.database_name) + .create_collection(&data.collection_name) + .session(&mut *session) + .write_concern(WriteConcern::majority()) + .await + .unwrap(); } } @@ -461,10 +462,6 @@ impl TestRunner { ) }); update_options_for_testing(&mut options); - let handler = Arc::new(EventHandler::new()); - options.command_event_handler = Some(handler.clone().command_sender().into()); - options.cmap_event_handler = Some(handler.clone().cmap_sender().into()); - options.sdam_event_handler = Some(handler.clone().sdam_sender().into()); options.server_api = server_api; @@ -503,7 +500,6 @@ impl TestRunner { id, Entity::Client(ClientEntity::new( options, - handler, observe_events, ignore_command_names, observe_sensitive_commands, @@ -535,11 +531,14 @@ impl TestRunner { TestFileEntity::Session(session) => { let id = session.id.clone(); let client = self.get_client(&session.client).await; - let client_session = client + let mut client_session = client .start_session() .with_options(session.session_options.clone()) .await .unwrap(); + if let Some(time) = &*self.cluster_time.read().await { + client_session.advance_cluster_time(time); + } (id, Entity::Session(SessionEntity::new(client_session))) } TestFileEntity::Bucket(bucket) => { diff --git a/src/test/spec/v2_runner.rs b/src/test/spec/v2_runner.rs index 5e445212b..141b83625 100644 --- a/src/test/spec/v2_runner.rs +++ b/src/test/spec/v2_runner.rs @@ -9,11 +9,13 @@ use std::{future::IntoFuture, sync::Arc, time::Duration}; use futures::{future::BoxFuture, FutureExt}; use semver::VersionReq; +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ bson::{doc, from_bson}, coll::options::DropCollectionOptions, concern::WriteConcern, - options::{ClientOptions, CreateCollectionOptions, InsertManyOptions}, + options::{ClientOptions, CreateCollectionOptions}, sdam::{ServerInfo, MIN_HEARTBEAT_FREQUENCY}, selection_criteria::SelectionCriteria, test::{ @@ -22,7 +24,6 @@ use crate::{ log_uncaptured, spec::deserialize_spec_tests, util::{get_default_name, FailPointGuard}, - EventClient, FailPoint, TestClient, SERVERLESS, @@ -116,6 +117,7 @@ struct TestContext { description: String, ns: Namespace, internal_client: TestClient, + #[allow(deprecated)] client: EventClient, fail_point_guards: Vec, session0: Option, @@ -181,10 +183,10 @@ impl TestContext { match data { TestData::Single(data) => { if !data.is_empty() { - let options = InsertManyOptions::builder() + coll.insert_many(data.clone()) .write_concern(WriteConcern::majority()) - .build(); - coll.insert_many(data.clone(), options).await.unwrap(); + .await + .unwrap(); } } TestData::Many(_) => panic!("{}: invalid data format", &test.description), @@ -210,6 +212,7 @@ impl TestContext { .min_heartbeat_freq(Some(Duration::from_millis(50))); #[cfg(feature = "in-use-encryption-unstable")] let builder = csfle::set_auto_enc(builder, test); + #[allow(deprecated)] let client = builder.event_client().build().await; // TODO RUST-900: Remove this extraneous call. @@ -304,6 +307,7 @@ pub(crate) struct OpSessions<'a> { pub(crate) struct OpRunner<'a> { description: String, internal_client: TestClient, + #[allow(deprecated)] client: EventClient, ns: Namespace, fail_point_guards: &'a mut Vec, @@ -504,8 +508,10 @@ async fn run_v2_test(path: std::path::PathBuf, test_file: TestFile) { } if let Some(expectations) = &test.expectations { + #[allow(deprecated)] let events: Vec = test_ctx .client + .events .get_all_command_started_events() .into_iter() .map(Into::into) @@ -546,15 +552,17 @@ async fn run_v2_test(path: std::path::PathBuf, test_file: TestFile) { } } +#[allow(deprecated)] fn assert_different_lsid_on_last_two_commands(client: &EventClient) { - let events = client.get_all_command_started_events(); + let events = client.events.get_all_command_started_events(); let lsid1 = events[events.len() - 1].command.get("lsid").unwrap(); let lsid2 = events[events.len() - 2].command.get("lsid").unwrap(); assert_ne!(lsid1, lsid2); } +#[allow(deprecated)] fn assert_same_lsid_on_last_two_commands(client: &EventClient) { - let events = client.get_all_command_started_events(); + let events = client.events.get_all_command_started_events(); let lsid1 = events[events.len() - 1].command.get("lsid").unwrap(); let lsid2 = events[events.len() - 2].command.get("lsid").unwrap(); assert_eq!(lsid1, lsid2); diff --git a/src/test/spec/v2_runner/csfle.rs b/src/test/spec/v2_runner/csfle.rs index 310eb57ad..f80e23172 100644 --- a/src/test/spec/v2_runner/csfle.rs +++ b/src/test/spec/v2_runner/csfle.rs @@ -22,7 +22,7 @@ pub(crate) async fn populate_key_vault(client: &Client, kv_data: Option<&Vec, ) -> BoxFuture<'a, Result>> { async move { + let act = collection + .find(self.filter.clone().unwrap_or_default()) + .with_options(self.options.clone()); let result = match session { Some(session) => { - let mut cursor = collection - .find_with_session(self.filter.clone(), self.options.clone(), session) - .await?; + let mut cursor = act.session(&mut *session).await?; cursor .stream(session) .try_collect::>() .await? } None => { - let cursor = collection - .find(self.filter.clone(), self.options.clone()) - .await?; + let cursor = act.await?; cursor.try_collect::>().await? } }; @@ -426,14 +425,11 @@ impl TestOperation for InsertMany { let options = self.options.clone(); async move { - let result = match session { - Some(session) => { - collection - .insert_many_with_session(documents, options, session) - .await? - } - None => collection.insert_many(documents, options).await?, - }; + let result = collection + .insert_many(documents) + .with_options(options) + .optional(session, |a, s| a.session(s)) + .await?; let ids: HashMap = result .inserted_ids .into_iter() @@ -462,14 +458,11 @@ impl TestOperation for InsertOne { let document = self.document.clone(); let options = self.options.clone(); async move { - let result = match session { - Some(session) => { - collection - .insert_one_with_session(document, options, session) - .await? - } - None => collection.insert_one(document, options).await?, - }; + let result = collection + .insert_one(document) + .with_options(options) + .optional(session, |a, s| a.session(s)) + .await?; let result = bson::to_bson(&result)?; Ok(Some(result)) } @@ -685,17 +678,12 @@ impl TestOperation for FindOne { session: Option<&'a mut ClientSession>, ) -> BoxFuture<'a, Result>> { async move { + let action = collection + .find_one(self.filter.clone().unwrap_or_default()) + .with_options(self.options.clone()); let result = match session { - Some(session) => { - collection - .find_one_with_session(self.filter.clone(), self.options.clone(), session) - .await? - } - None => { - collection - .find_one(self.filter.clone(), self.options.clone()) - .await? - } + Some(session) => action.session(session).await?, + None => action.await?, }; match result { Some(result) => Ok(Some(Bson::from(result))), @@ -783,27 +771,11 @@ impl TestOperation for ReplaceOne { session: Option<&'a mut ClientSession>, ) -> BoxFuture<'a, Result>> { async move { - let result = match session { - Some(session) => { - collection - .replace_one_with_session( - self.filter.clone(), - self.replacement.clone(), - self.options.clone(), - session, - ) - .await? - } - None => { - collection - .replace_one( - self.filter.clone(), - self.replacement.clone(), - self.options.clone(), - ) - .await? - } - }; + let result = collection + .replace_one(self.filter.clone(), self.replacement.clone()) + .with_options(self.options.clone()) + .optional(session, |a, s| a.session(s)) + .await?; let result = bson::to_bson(&result)?; Ok(Some(result)) } @@ -826,27 +798,11 @@ impl TestOperation for FindOneAndUpdate { session: Option<&'a mut ClientSession>, ) -> BoxFuture<'a, Result>> { async move { - let result = match session { - Some(session) => { - collection - .find_one_and_update_with_session( - self.filter.clone(), - self.update.clone(), - self.options.clone(), - session, - ) - .await? - } - None => { - collection - .find_one_and_update( - self.filter.clone(), - self.update.clone(), - self.options.clone(), - ) - .await? - } - }; + let result = collection + .find_one_and_update(self.filter.clone(), self.update.clone()) + .with_options(self.options.clone()) + .optional(session, |a, s| a.session(s)) + .await?; let result = bson::to_bson(&result)?; Ok(Some(result)) } @@ -869,27 +825,11 @@ impl TestOperation for FindOneAndReplace { session: Option<&'a mut ClientSession>, ) -> BoxFuture<'a, Result>> { async move { - let result = match session { - Some(session) => { - collection - .find_one_and_replace_with_session( - self.filter.clone(), - self.replacement.clone(), - self.options.clone(), - session, - ) - .await? - } - None => { - collection - .find_one_and_replace( - self.filter.clone(), - self.replacement.clone(), - self.options.clone(), - ) - .await? - } - }; + let result = collection + .find_one_and_replace(self.filter.clone(), self.replacement.clone()) + .with_options(self.options.clone()) + .optional(session, |a, s| a.session(s)) + .await?; let result = bson::to_bson(&result)?; Ok(Some(result)) } @@ -911,22 +851,11 @@ impl TestOperation for FindOneAndDelete { session: Option<&'a mut ClientSession>, ) -> BoxFuture<'a, Result>> { async move { - let result = match session { - Some(session) => { - collection - .find_one_and_delete_with_session( - self.filter.clone(), - self.options.clone(), - session, - ) - .await? - } - None => { - collection - .find_one_and_delete(self.filter.clone(), self.options.clone()) - .await? - } - }; + let result = collection + .find_one_and_delete(self.filter.clone()) + .with_options(self.options.clone()) + .optional(session, |a, s| a.session(s)) + .await?; let result = bson::to_bson(&result)?; Ok(Some(result)) } @@ -1076,7 +1005,8 @@ impl TestOperation for StartTransaction { ) -> BoxFuture<'a, Result>> { async move { session - .start_transaction(self.options.clone()) + .start_transaction() + .with_options(self.options.clone()) .await .map(|_| None) } @@ -1483,7 +1413,9 @@ impl TestOperation for WithTransaction { async move { let session = sessions.session0.unwrap(); session - .with_transaction( + .start_transaction() + .with_options(self.options.clone()) + .and_run( (runner, &self.callback.operations, sessions.session1), |session, (runner, operations, session1)| { async move { @@ -1507,7 +1439,6 @@ impl TestOperation for WithTransaction { } .boxed() }, - self.options.clone(), ) .await?; Ok(None) diff --git a/src/test/spec/v2_runner/test_file.rs b/src/test/spec/v2_runner/test_file.rs index 033279a16..b622162ab 100644 --- a/src/test/spec/v2_runner/test_file.rs +++ b/src/test/spec/v2_runner/test_file.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Deserializer}; use crate::{ bson::Document, - options::{FindOptions, ReadPreference, SelectionCriteria, SessionOptions}, + options::{ReadPreference, SelectionCriteria, SessionOptions}, test::{ log_uncaptured, spec::merge_uri_options, @@ -177,12 +177,10 @@ impl Outcome { .database(db_name) .collection_with_options(coll_name, coll_opts); let selection_criteria = SelectionCriteria::ReadPreference(ReadPreference::Primary); - let options = FindOptions::builder() + let actual_data: Vec = coll + .find(doc! {}) .sort(doc! { "_id": 1 }) .selection_criteria(selection_criteria) - .build(); - let actual_data: Vec = coll - .find(None, options) .await .unwrap() .try_collect() diff --git a/src/test/spec/write_error.rs b/src/test/spec/write_error.rs index 0efeb47fb..a80267a68 100644 --- a/src/test/spec/write_error.rs +++ b/src/test/spec/write_error.rs @@ -1,12 +1,15 @@ +#[allow(deprecated)] +use crate::test::EventClient; use crate::{ bson::{doc, Document}, error::{ErrorKind, WriteFailure}, - test::{log_uncaptured, EventClient}, + test::log_uncaptured, Collection, }; #[tokio::test] async fn details() { + #[allow(deprecated)] let client = EventClient::new().await; if client.server_version_lt(5, 0) { @@ -24,12 +27,16 @@ async fn details() { .await .unwrap(); let coll: Collection = db.collection("test"); - let err = coll.insert_one(doc! { "x": 1 }, None).await.unwrap_err(); + let err = coll.insert_one(doc! { "x": 1 }).await.unwrap_err(); let write_err = match *err.kind { ErrorKind::Write(WriteFailure::WriteError(e)) => e, _ => panic!("expected WriteError, got {:?}", err.kind), }; - let (_, event) = client.get_successful_command_execution("insert"); + #[allow(deprecated)] + let (_, event) = { + let mut events = client.events.clone(); + events.get_successful_command_execution("insert") + }; assert_eq!(write_err.code, 121 /* DocumentValidationFailure */); assert_eq!( &write_err.details.unwrap(), diff --git a/src/test/util.rs b/src/test/util.rs index 98c8ebd63..e93bd1a91 100644 --- a/src/test/util.rs +++ b/src/test/util.rs @@ -1,15 +1,16 @@ mod event; +pub(crate) mod event_buffer; mod failpoint; mod matchable; -mod subscriber; #[cfg(feature = "tracing-unstable")] mod trace; +#[allow(deprecated)] +pub(crate) use self::event::EventClient; pub(crate) use self::{ - event::{Event, EventClient, EventHandler}, + event::Event, failpoint::{FailPoint, FailPointGuard, FailPointMode}, matchable::{assert_matches, eq_matches, is_expected_type, MatchErrExt, Matchable}, - subscriber::EventSubscriber, }; #[cfg(feature = "tracing-unstable")] @@ -20,8 +21,7 @@ pub(crate) use self::trace::{ TracingHandler, }; -use std::{fmt::Debug, sync::Arc, time::Duration}; - +use self::event_buffer::EventBuffer; #[cfg(feature = "in-use-encryption-unstable")] use crate::client::EncryptedClientBuilder; use crate::{ @@ -32,10 +32,11 @@ use crate::{ use bson::Document; use semver::{Version, VersionReq}; use serde::{de::DeserializeOwned, Serialize}; +use std::{fmt::Debug, time::Duration}; use super::get_client_options; use crate::{ - error::{CommandError, ErrorKind, Result}, + error::Result, options::{AuthMechanism, ClientOptions, CollectionOptions, CreateCollectionOptions}, test::{ update_options_for_testing, @@ -68,7 +69,7 @@ impl Client { pub(crate) fn test_builder() -> TestClientBuilder { TestClientBuilder { options: None, - handler: None, + buffer: None, min_heartbeat_freq: None, #[cfg(feature = "in-use-encryption-unstable")] encrypted: None, @@ -78,7 +79,7 @@ impl Client { pub(crate) struct TestClientBuilder { options: Option, - handler: Option>, + buffer: Option, min_heartbeat_freq: Option, #[cfg(feature = "in-use-encryption-unstable")] encrypted: Option, @@ -106,10 +107,10 @@ impl TestClientBuilder { self } - pub(crate) fn event_handler(mut self, handler: impl Into>>) -> Self { - let handler = handler.into(); - assert!(self.handler.is_none() || handler.is_none()); - self.handler = handler; + pub(crate) fn event_buffer(mut self, buffer: impl Into>) -> Self { + let buffer = buffer.into(); + assert!(self.buffer.is_none() || buffer.is_none()); + self.buffer = buffer; self } @@ -139,10 +140,8 @@ impl TestClientBuilder { None => get_client_options().await.clone(), }; - if let Some(handler) = self.handler { - options.command_event_handler = Some(handler.clone().into()); - options.cmap_event_handler = Some(handler.clone().into()); - options.sdam_event_handler = Some(handler.clone().into()); + if let Some(handler) = self.buffer { + handler.register(&mut options); } if let Some(freq) = self.min_heartbeat_freq { @@ -163,8 +162,8 @@ impl TestClientBuilder { TestClient::from_client(client).await } - pub(crate) fn handler(&self) -> Option<&Arc> { - self.handler.as_ref() + pub(crate) fn buffer(&self) -> Option<&EventBuffer> { + self.buffer.as_ref() } } @@ -175,18 +174,7 @@ impl TestClient { } pub(crate) async fn with_options(options: impl Into>) -> Self { - Self::with_handler(None, options).await - } - - pub(crate) async fn with_handler( - event_handler: Option>, - options: impl Into>, - ) -> Self { - Client::test_builder() - .options(options) - .event_handler(event_handler) - .build() - .await + Client::test_builder().options(options).build().await } async fn from_client(client: Client) -> Self { @@ -268,7 +256,7 @@ impl TestClient { coll_name: &str, ) -> Collection { let coll = self.get_coll(db_name, coll_name); - drop_collection(&coll).await; + coll.drop().await.unwrap(); coll } @@ -278,10 +266,10 @@ impl TestClient { coll_name: &str, ) -> Collection where - T: Serialize + DeserializeOwned + Unpin + Debug, + T: Serialize + DeserializeOwned + Unpin + Debug + Send + Sync, { let coll = self.database(db_name).collection(coll_name); - drop_collection(&coll).await; + coll.drop().await.unwrap(); coll } @@ -295,17 +283,6 @@ impl TestClient { .collection_with_options(coll_name, options) } - pub(crate) async fn init_db_and_coll_with_options( - &self, - db_name: &str, - coll_name: &str, - options: CollectionOptions, - ) -> Collection { - let coll = self.get_coll_with_options(db_name, coll_name, options); - drop_collection(&coll).await; - coll - } - pub(crate) async fn create_fresh_collection( &self, db_name: &str, @@ -413,7 +390,7 @@ impl TestClient { pub(crate) async fn drop_collection(&self, db_name: &str, coll_name: &str) { let coll = self.get_coll(db_name, coll_name); - drop_collection(&coll).await; + coll.drop().await.unwrap(); } /// Returns the `Topology' that can be determined without a server query, i.e. all except @@ -493,18 +470,6 @@ impl TestClient { } } -pub(crate) async fn drop_collection(coll: &Collection) -where - T: Serialize + DeserializeOwned + Unpin + Debug, -{ - match coll.drop().await.map_err(|e| *e.kind) { - Err(ErrorKind::Command(CommandError { code: 26, .. })) | Ok(_) => {} - e @ Err(_) => { - e.unwrap(); - } - }; -} - pub(crate) fn get_default_name(description: &str) -> String { let mut db_name = description.replace('$', "%").replace([' ', '.'], "_"); // database names must have fewer than 38 characters diff --git a/src/test/util/event.rs b/src/test/util/event.rs index 7478ad83c..ffb00f9fc 100644 --- a/src/test/util/event.rs +++ b/src/test/util/event.rs @@ -1,63 +1,22 @@ -use std::{ - collections::VecDeque, - fs::File, - io::{BufWriter, Write}, - sync::{Arc, Mutex, RwLock}, - time::Duration, -}; +use std::time::Duration; use derive_more::From; -use time::OffsetDateTime; -use tokio::sync::broadcast::error::SendError; +use serde::Serialize; -use super::{subscriber::EventSubscriber, TestClient, TestClientBuilder}; +use super::{event_buffer::EventBuffer, TestClient, TestClientBuilder}; use crate::{ - bson::{doc, to_document, Document}, + bson::doc, event::{ - cmap::{ - CmapEvent, - ConnectionCheckedInEvent, - ConnectionCheckedOutEvent, - ConnectionCheckoutFailedEvent, - ConnectionCheckoutStartedEvent, - ConnectionClosedEvent, - ConnectionCreatedEvent, - ConnectionReadyEvent, - PoolClearedEvent, - PoolClosedEvent, - PoolCreatedEvent, - PoolReadyEvent, - }, - command::{CommandEvent, CommandFailedEvent, CommandStartedEvent, CommandSucceededEvent}, - sdam::{ - SdamEvent, - ServerClosedEvent, - ServerDescriptionChangedEvent, - ServerHeartbeatFailedEvent, - ServerHeartbeatStartedEvent, - ServerHeartbeatSucceededEvent, - ServerOpeningEvent, - TopologyClosedEvent, - TopologyDescriptionChangedEvent, - TopologyOpeningEvent, - }, + cmap::CmapEvent, + command::{CommandEvent, CommandStartedEvent, CommandSucceededEvent}, + sdam::SdamEvent, }, options::ClientOptions, - runtime, - test::spec::ExpectedEventType, Client, }; -pub(crate) type EventQueue = Arc>>; - -fn add_event_to_queue(event_queue: &EventQueue, event: T) { - event_queue - .write() - .unwrap() - .push_back((event, OffsetDateTime::now_utc())) -} - -#[derive(Clone, Debug, From)] +#[derive(Clone, Debug, From, Serialize)] +#[serde(untagged)] #[allow(clippy::large_enum_variant)] pub(crate) enum Event { Cmap(CmapEvent), @@ -124,7 +83,7 @@ impl CommandEvent { } } - fn request_id(&self) -> i32 { + pub(crate) fn request_id(&self) -> i32 { match self { CommandEvent::Started(event) => event.request_id, CommandEvent::Failed(event) => event.request_id, @@ -147,415 +106,14 @@ impl CommandEvent { } } -#[derive(Clone, Debug)] -pub(crate) struct EventHandler { - command_events: EventQueue, - sdam_events: EventQueue, - cmap_events: EventQueue, - event_broadcaster: tokio::sync::broadcast::Sender, - connections_checked_out: Arc>, -} - -impl EventHandler { - pub(crate) fn new() -> Self { - let (event_broadcaster, _) = tokio::sync::broadcast::channel(10_000); - Self { - command_events: Default::default(), - sdam_events: Default::default(), - cmap_events: Default::default(), - event_broadcaster, - connections_checked_out: Arc::new(Mutex::new(0)), - } - } - - pub(crate) fn command_sender(self: Arc) -> tokio::sync::mpsc::Sender { - let (tx, mut rx) = tokio::sync::mpsc::channel::(100); - crate::runtime::spawn(async move { - while let Some(ev) = rx.recv().await { - self.handle(ev.clone()); - add_event_to_queue(&self.command_events, ev); - } - }); - tx - } - - pub(crate) fn cmap_sender(self: Arc) -> tokio::sync::mpsc::Sender { - let (tx, mut rx) = tokio::sync::mpsc::channel::(100); - crate::runtime::spawn(async move { - while let Some(ev) = rx.recv().await { - match &ev { - CmapEvent::ConnectionCheckedOut(_) => { - *self.connections_checked_out.lock().unwrap() += 1 - } - CmapEvent::ConnectionCheckedIn(_) => { - *self.connections_checked_out.lock().unwrap() -= 1 - } - _ => (), - } - self.handle(ev.clone()); - add_event_to_queue(&self.cmap_events, ev); - } - }); - tx - } - - pub(crate) fn sdam_sender(self: Arc) -> tokio::sync::mpsc::Sender { - let (tx, mut rx) = tokio::sync::mpsc::channel::(100); - crate::runtime::spawn(async move { - while let Some(ev) = rx.recv().await { - self.handle(ev.clone()); - add_event_to_queue(&self.sdam_events, ev); - } - }); - tx - } - - fn handle(&self, event: impl Into) { - // this only errors if no receivers are listening which isn't a concern here. - let _: std::result::Result> = - self.event_broadcaster.send(event.into()); - } - - pub(crate) fn subscribe(&self) -> EventSubscriber { - EventSubscriber::new(self, self.event_broadcaster.subscribe()) - } - - pub(crate) fn broadcaster(&self) -> &tokio::sync::broadcast::Sender { - &self.event_broadcaster - } - - /// Gets all of the command started events for the specified command names. - pub(crate) fn get_command_started_events( - &self, - command_names: &[&str], - ) -> Vec { - let events = self.command_events.read().unwrap(); - events - .iter() - .filter_map(|(event, _)| match event { - CommandEvent::Started(event) => { - if command_names.contains(&event.command_name.as_str()) { - Some(event.clone()) - } else { - None - } - } - _ => None, - }) - .collect() - } - - /// Gets all of the command started events, excluding configureFailPoint events. - pub(crate) fn get_all_command_started_events(&self) -> Vec { - let events = self.command_events.read().unwrap(); - events - .iter() - .filter_map(|(event, _)| match event { - CommandEvent::Started(event) if event.command_name != "configureFailPoint" => { - Some(event.clone()) - } - _ => None, - }) - .collect() - } - - pub(crate) fn get_filtered_events( - &self, - event_type: ExpectedEventType, - filter: F, - ) -> Vec - where - F: Fn(&Event) -> bool, - { - match event_type { - ExpectedEventType::Command => { - let events = self.command_events.read().unwrap(); - events - .iter() - .cloned() - .map(|(event, _)| Event::Command(event)) - .filter(|e| filter(e)) - .collect() - } - ExpectedEventType::Cmap => { - let events = self.cmap_events.read().unwrap(); - events - .iter() - .cloned() - .map(|(event, _)| Event::Cmap(event)) - .filter(|e| filter(e)) - .collect() - } - ExpectedEventType::CmapWithoutConnectionReady => { - let mut events = self.get_filtered_events(ExpectedEventType::Cmap, filter); - events.retain(|ev| !matches!(ev, Event::Cmap(CmapEvent::ConnectionReady(_)))); - events - } - ExpectedEventType::Sdam => { - let events = self.sdam_events.read().unwrap(); - events - .iter() - .cloned() - .map(|(event, _)| Event::Sdam(event)) - .filter(filter) - .collect() - } - } - } - - pub(crate) fn write_events_list_to_file(&self, names: &[&str], writer: &mut BufWriter) { - let mut add_comma = false; - let mut write_json = |mut event: Document, name: &str, time: &OffsetDateTime| { - event.insert("name", name); - event.insert("observedAt", time.unix_timestamp()); - let mut json_string = serde_json::to_string(&event).unwrap(); - if add_comma { - json_string.insert(0, ','); - } else { - add_comma = true; - } - write!(writer, "{}", json_string).unwrap(); - }; - - for (command_event, time) in self.command_events.read().unwrap().iter() { - let name = command_event.name(); - if names.contains(&name) { - let event = to_document(&command_event).unwrap(); - write_json(event, name, time); - } - } - for (sdam_event, time) in self.sdam_events.read().unwrap().iter() { - let name = sdam_event.name(); - if names.contains(&name) { - let event = to_document(&sdam_event).unwrap(); - write_json(event, name, time); - } - } - for (cmap_event, time) in self.cmap_events.read().unwrap().iter() { - let name = cmap_event.planned_maintenance_testing_name(); - if names.contains(&name) { - let event = to_document(&cmap_event).unwrap(); - write_json(event, name, time); - } - } - } - - pub(crate) fn connections_checked_out(&self) -> u32 { - *self.connections_checked_out.lock().unwrap() - } - - pub(crate) fn clear_cached_events(&self) { - self.command_events.write().unwrap().clear(); - self.cmap_events.write().unwrap().clear(); - self.sdam_events.write().unwrap().clear(); - } -} - -#[allow(deprecated)] -impl crate::event::cmap::CmapEventHandler for EventHandler { - fn handle_connection_checked_out_event(&self, event: ConnectionCheckedOutEvent) { - *self.connections_checked_out.lock().unwrap() += 1; - let event = CmapEvent::ConnectionCheckedOut(event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } - - fn handle_connection_checkout_failed_event(&self, event: ConnectionCheckoutFailedEvent) { - let event = CmapEvent::ConnectionCheckoutFailed(event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } - - fn handle_pool_cleared_event(&self, pool_cleared_event: PoolClearedEvent) { - let event = CmapEvent::PoolCleared(pool_cleared_event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } - - fn handle_pool_ready_event(&self, event: PoolReadyEvent) { - let event = CmapEvent::PoolReady(event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } - - fn handle_pool_created_event(&self, event: PoolCreatedEvent) { - let event = CmapEvent::PoolCreated(event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } - - fn handle_pool_closed_event(&self, event: PoolClosedEvent) { - let event = CmapEvent::PoolClosed(event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } - - fn handle_connection_created_event(&self, event: ConnectionCreatedEvent) { - let event = CmapEvent::ConnectionCreated(event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } - - fn handle_connection_ready_event(&self, event: ConnectionReadyEvent) { - let event = CmapEvent::ConnectionReady(event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } - - fn handle_connection_closed_event(&self, event: ConnectionClosedEvent) { - let event = CmapEvent::ConnectionClosed(event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } - - fn handle_connection_checkout_started_event(&self, event: ConnectionCheckoutStartedEvent) { - let event = CmapEvent::ConnectionCheckoutStarted(event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } - - fn handle_connection_checked_in_event(&self, event: ConnectionCheckedInEvent) { - *self.connections_checked_out.lock().unwrap() -= 1; - let event = CmapEvent::ConnectionCheckedIn(event); - self.handle(event.clone()); - add_event_to_queue(&self.cmap_events, event); - } -} - -#[allow(deprecated)] -impl crate::event::sdam::SdamEventHandler for EventHandler { - fn handle_server_description_changed_event(&self, event: ServerDescriptionChangedEvent) { - let event = SdamEvent::ServerDescriptionChanged(Box::new(event)); - self.handle(event.clone()); - add_event_to_queue(&self.sdam_events, event); - } - - fn handle_server_opening_event(&self, event: ServerOpeningEvent) { - let event = SdamEvent::ServerOpening(event); - self.handle(event.clone()); - add_event_to_queue(&self.sdam_events, event); - } - - fn handle_server_closed_event(&self, event: ServerClosedEvent) { - let event = SdamEvent::ServerClosed(event); - self.handle(event.clone()); - add_event_to_queue(&self.sdam_events, event); - } - - fn handle_topology_description_changed_event(&self, event: TopologyDescriptionChangedEvent) { - let event = SdamEvent::TopologyDescriptionChanged(Box::new(event)); - self.handle(event.clone()); - add_event_to_queue(&self.sdam_events, event); - } - - fn handle_topology_opening_event(&self, event: TopologyOpeningEvent) { - let event = SdamEvent::TopologyOpening(event); - self.handle(event.clone()); - add_event_to_queue(&self.sdam_events, event); - } - - fn handle_topology_closed_event(&self, event: TopologyClosedEvent) { - let event = SdamEvent::TopologyClosed(event); - self.handle(event.clone()); - add_event_to_queue(&self.sdam_events, event); - } - - fn handle_server_heartbeat_started_event(&self, event: ServerHeartbeatStartedEvent) { - let event = SdamEvent::ServerHeartbeatStarted(event); - self.handle(event.clone()); - add_event_to_queue(&self.sdam_events, event); - } - - fn handle_server_heartbeat_succeeded_event(&self, event: ServerHeartbeatSucceededEvent) { - let event = SdamEvent::ServerHeartbeatSucceeded(event); - self.handle(event.clone()); - add_event_to_queue(&self.sdam_events, event); - } - - fn handle_server_heartbeat_failed_event(&self, event: ServerHeartbeatFailedEvent) { - let event = SdamEvent::ServerHeartbeatFailed(event); - self.handle(event.clone()); - add_event_to_queue(&self.sdam_events, event); - } -} - -#[allow(deprecated)] -impl crate::event::command::CommandEventHandler for EventHandler { - fn handle_command_started_event(&self, event: CommandStartedEvent) { - let event = CommandEvent::Started(event); - self.handle(event.clone()); - add_event_to_queue(&self.command_events, event); - } - - fn handle_command_failed_event(&self, event: CommandFailedEvent) { - let event = CommandEvent::Failed(event); - self.handle(event.clone()); - add_event_to_queue(&self.command_events, event); - } - - fn handle_command_succeeded_event(&self, event: CommandSucceededEvent) { - let event = CommandEvent::Succeeded(event); - self.handle(event.clone()); - add_event_to_queue(&self.command_events, event); - } -} - -impl EventSubscriber<'_, EventHandler, Event> { - /// Waits for the next CommandStartedEvent/CommandFailedEvent pair. - /// If the next CommandStartedEvent is associated with a CommandFailedEvent, this method will - /// panic. - pub(crate) async fn wait_for_successful_command_execution( - &mut self, - timeout: Duration, - command_name: impl AsRef, - ) -> Option<(CommandStartedEvent, CommandSucceededEvent)> { - runtime::timeout(timeout, async { - let started = self - .filter_map_event(Duration::MAX, |event| match event { - Event::Command(CommandEvent::Started(s)) - if s.command_name == command_name.as_ref() => - { - Some(s) - } - _ => None, - }) - .await - .unwrap(); - - let succeeded = self - .filter_map_event(Duration::MAX, |event| match event { - Event::Command(CommandEvent::Succeeded(s)) - if s.request_id == started.request_id => - { - Some(s) - } - Event::Command(CommandEvent::Failed(f)) - if f.request_id == started.request_id => - { - panic!( - "expected {} to succeed but it failed: {:#?}", - command_name.as_ref(), - f - ) - } - _ => None, - }) - .await - .unwrap(); - - (started, succeeded) - }) - .await - .ok() - } -} - +#[deprecated = "use EventBuffer directly"] #[derive(Clone, Debug)] pub(crate) struct EventClient { client: TestClient, - pub(crate) handler: Arc, + pub(crate) events: EventBuffer, } +#[allow(deprecated)] impl std::ops::Deref for EventClient { type Target = TestClient; @@ -564,6 +122,7 @@ impl std::ops::Deref for EventClient { } } +#[allow(deprecated)] impl std::ops::DerefMut for EventClient { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.client @@ -571,151 +130,76 @@ impl std::ops::DerefMut for EventClient { } impl TestClientBuilder { + #[deprecated = "use EventBuffer directly"] + #[allow(deprecated)] pub(crate) fn event_client(self) -> EventClientBuilder { EventClientBuilder { inner: self } } } +#[deprecated = "use EventBuffer directly"] pub(crate) struct EventClientBuilder { inner: TestClientBuilder, } +#[allow(deprecated)] impl EventClientBuilder { pub(crate) async fn build(self) -> EventClient { let mut inner = self.inner; - if inner.handler.is_none() { - inner = inner.event_handler(Arc::new(EventHandler::new())); + if inner.buffer.is_none() { + inner = inner.event_buffer(EventBuffer::new()); } - let handler = inner.handler().unwrap().clone(); + let mut handler = inner.buffer().unwrap().clone(); let client = inner.build().await; // clear events from commands used to set up client. - handler.command_events.write().unwrap().clear(); + handler.retain(|ev| !matches!(ev, Event::Command(_))); - EventClient { client, handler } + EventClient { + client, + events: handler, + } } } +#[allow(deprecated)] impl EventClient { pub(crate) async fn new() -> Self { EventClient::with_options(None).await } - async fn with_options_and_handler( + async fn with_options_and_buffer( options: impl Into>, - handler: impl Into>, + handler: impl Into>, ) -> Self { Client::test_builder() .options(options) - .event_handler(handler.into().map(Arc::new)) + .event_buffer(handler) .event_client() .build() .await } pub(crate) async fn with_options(options: impl Into>) -> Self { - Self::with_options_and_handler(options, None).await + Self::with_options_and_buffer(options, None).await } pub(crate) async fn with_additional_options( options: impl Into>, min_heartbeat_freq: Option, use_multiple_mongoses: Option, - event_handler: impl Into>, + event_handler: impl Into>, ) -> Self { Client::test_builder() .additional_options(options, use_multiple_mongoses.unwrap_or(false)) .await .min_heartbeat_freq(min_heartbeat_freq) - .event_handler(event_handler.into().map(Arc::new)) + .event_buffer(event_handler) .event_client() .build() .await } - /// Gets the first started/succeeded pair of events for the given command name, popping off all - /// events before and between them. - /// - /// Panics if the command failed or could not be found in the events. - pub(crate) fn get_successful_command_execution( - &self, - command_name: &str, - ) -> (CommandStartedEvent, CommandSucceededEvent) { - let mut command_events = self.handler.command_events.write().unwrap(); - - let mut started: Option = None; - - while let Some((event, _)) = command_events.pop_front() { - if event.command_name() == command_name { - match started { - None => { - let event = event - .as_command_started() - .unwrap_or_else(|| { - panic!("first event not a command started event {:?}", event) - }) - .clone(); - started = Some(event); - continue; - } - Some(started) if event.request_id() == started.request_id => { - let succeeded = event - .as_command_succeeded() - .expect("second event not a command succeeded event") - .clone(); - - return (started, succeeded); - } - _ => continue, - } - } - } - panic!("could not find event for {} command", command_name); - } - - /// Gets all of the command started events for the specified command names. - pub(crate) fn get_command_started_events( - &self, - command_names: &[&str], - ) -> Vec { - self.handler.get_command_started_events(command_names) - } - - /// Gets all command started events, excluding configureFailPoint events. - pub(crate) fn get_all_command_started_events(&self) -> Vec { - self.handler.get_all_command_started_events() - } - - pub(crate) fn get_command_events(&self, command_names: &[&str]) -> Vec { - self.handler - .command_events - .write() - .unwrap() - .drain(..) - .map(|(event, _)| event) - .filter(|event| command_names.contains(&event.command_name())) - .collect() - } - - pub(crate) fn count_pool_cleared_events(&self) -> usize { - let mut out = 0; - for (event, _) in self.handler.cmap_events.read().unwrap().iter() { - if matches!(event, CmapEvent::PoolCleared(_)) { - out += 1; - } - } - out - } - - #[allow(dead_code)] - pub(crate) fn subscribe_to_events(&self) -> EventSubscriber<'_, EventHandler, Event> { - self.handler.subscribe() - } - - pub(crate) fn clear_cached_events(&self) { - self.handler.clear_cached_events() - } - #[allow(dead_code)] pub(crate) fn into_client(self) -> crate::Client { self.client.into_client() @@ -723,13 +207,17 @@ impl EventClient { } #[tokio::test] +#[allow(deprecated)] async fn command_started_event_count() { let client = EventClient::new().await; let coll = client.database("foo").collection("bar"); for i in 0..10 { - coll.insert_one(doc! { "x": i }, None).await.unwrap(); + coll.insert_one(doc! { "x": i }).await.unwrap(); } - assert_eq!(client.get_command_started_events(&["insert"]).len(), 10); + assert_eq!( + client.events.get_command_started_events(&["insert"]).len(), + 10 + ); } diff --git a/src/test/util/event_buffer.rs b/src/test/util/event_buffer.rs new file mode 100644 index 000000000..d1ba86e94 --- /dev/null +++ b/src/test/util/event_buffer.rs @@ -0,0 +1,484 @@ +use std::{ + sync::{Arc, Mutex}, + time::Duration, +}; + +use time::OffsetDateTime; +use tokio::sync::{futures::Notified, Notify}; + +use crate::{ + client::options::ClientOptions, + event::{ + cmap::CmapEvent, + command::{CommandEvent, CommandStartedEvent, CommandSucceededEvent}, + }, + runtime, +}; + +use super::Event; + +/// A buffer of events that provides utility methods for querying the buffer and awaiting new event +/// arrival. +/// +/// New test code should prefer to use this over `EventSubscriber`. +#[derive(Clone, Debug)] +pub(crate) struct EventBuffer { + inner: Arc>, +} + +#[derive(Debug)] +struct EventBufferInner { + events: Mutex>, + event_received: Notify, +} + +#[derive(Debug)] +struct GenVec { + data: Vec, + generation: Generation, +} + +#[derive(Copy, Clone, PartialEq, Debug)] +struct Generation(usize); + +impl GenVec { + fn new() -> Self { + Self { + data: vec![], + generation: Generation(0), + } + } +} + +impl EventBuffer { + pub(crate) fn new() -> Self { + Self { + inner: Arc::new(EventBufferInner { + events: Mutex::new(GenVec::new()), + event_received: Notify::new(), + }), + } + } + + #[allow(unused)] + pub(crate) fn filter_map(&self, f: impl Fn(&T) -> Option) -> Vec { + self.inner + .events + .lock() + .unwrap() + .data + .iter() + .map(|(ev, _)| ev) + .filter_map(f) + .collect() + } + + /// Subscribe to events generated after the point of this call. + /// + /// New test code should prefer using `watch_all` over this. + #[deprecated = "use watch_all"] + #[allow(deprecated)] + pub(crate) fn subscribe(&self) -> EventSubscriber<'_, T> { + let (index, generation) = { + let events = self.inner.events.lock().unwrap(); + (events.data.len(), events.generation) + }; + EventSubscriber { + buffer: self, + index, + generation, + } + } + + /// Subscribe to all events contained in the buffer. + /// + /// New test code should prefer using `watch_all` over this. + #[deprecated = "use watch_all"] + #[allow(deprecated)] + pub(crate) fn subscribe_all(&self) -> EventSubscriber<'_, T> { + EventSubscriber { + buffer: self, + index: 0, + generation: self.inner.events.lock().unwrap().generation, + } + } + + // The `mut` isn't necessary on `self` here, but it serves as a useful lint on those + // methods that modify; if the caller only has a `&EventHandler` it can at worst case + // `clone` to get a `mut` one. + fn invalidate(&mut self, f: impl FnOnce(&mut Vec<(T, OffsetDateTime)>) -> R) -> R { + let mut events = self.inner.events.lock().unwrap(); + events.generation = Generation(events.generation.0 + 1); + let out = f(&mut events.data); + self.inner.event_received.notify_waiters(); + out + } + + pub(crate) fn clear_cached_events(&mut self) { + self.invalidate(|data| data.clear()); + } + + pub(crate) fn retain(&mut self, mut f: impl FnMut(&T) -> bool) { + self.invalidate(|data| data.retain(|(ev, _)| f(ev))); + } + + pub(crate) fn push_event(&self, ev: T) { + self.inner + .events + .lock() + .unwrap() + .data + .push((ev, OffsetDateTime::now_utc())); + self.inner.event_received.notify_waiters(); + } +} + +impl EventBuffer { + /// Returns a list of current events and a future to await for more being received. + pub(crate) fn watch_all(&self) -> (Vec, Notified) { + // The `Notify` must be created *before* reading the events to ensure any added + // events trigger notifications. + let notify = self.inner.event_received.notified(); + let events = self + .inner + .events + .lock() + .unwrap() + .data + .iter() + .map(|(ev, _)| ev) + .cloned() + .collect(); + (events, notify) + } + + /// Returns a list of current events. + pub(crate) fn all(&self) -> Vec { + self.watch_all().0 + } + + pub(crate) fn all_timed(&self) -> Vec<(T, OffsetDateTime)> { + self.inner.events.lock().unwrap().data.clone() + } +} + +impl EventBuffer { + pub(crate) fn handler + Send + Sync + 'static>( + &self, + ) -> crate::event::EventHandler { + let this = self.clone(); + crate::event::EventHandler::callback(move |ev: V| this.push_event(ev.into())) + } +} + +impl EventBuffer { + pub(crate) fn register(&self, client_options: &mut ClientOptions) { + client_options.command_event_handler = Some(self.handler()); + client_options.sdam_event_handler = Some(self.handler()); + client_options.cmap_event_handler = Some(self.handler()); + } + + pub(crate) fn connections_checked_out(&self) -> u32 { + let mut count = 0; + for (ev, _) in self.inner.events.lock().unwrap().data.iter() { + match ev { + Event::Cmap(CmapEvent::ConnectionCheckedOut(_)) => count += 1, + Event::Cmap(CmapEvent::ConnectionCheckedIn(_)) => count -= 1, + _ => (), + } + } + count + } + + /// Gets all of the command started events for the specified command names. + pub(crate) fn get_command_started_events( + &self, + command_names: &[&str], + ) -> Vec { + self.inner + .events + .lock() + .unwrap() + .data + .iter() + .filter_map(|(event, _)| match event { + Event::Command(CommandEvent::Started(event)) => { + if command_names.contains(&event.command_name.as_str()) { + Some(event.clone()) + } else { + None + } + } + _ => None, + }) + .collect() + } + + /// Gets all of the command started events, excluding configureFailPoint events. + pub(crate) fn get_all_command_started_events(&self) -> Vec { + self.inner + .events + .lock() + .unwrap() + .data + .iter() + .filter_map(|(event, _)| match event { + Event::Command(CommandEvent::Started(event)) + if event.command_name != "configureFailPoint" => + { + Some(event.clone()) + } + _ => None, + }) + .collect() + } + + /// Remove all command events from the buffer, returning those matching any of the command + /// names. + #[deprecated = "use immutable methods"] + pub(crate) fn get_command_events(&mut self, command_names: &[&str]) -> Vec { + let mut out = vec![]; + self.retain(|ev| match ev { + Event::Command(cev) => { + if command_names.contains(&cev.command_name()) { + out.push(cev.clone()); + } + false + } + _ => true, + }); + out + } + + /// Gets the first started/succeeded pair of events for the given command name, popping off all + /// command events before and between them. + /// + /// Panics if the command failed or could not be found in the events. + #[deprecated = "use immutable methods"] + pub(crate) fn get_successful_command_execution( + &mut self, + command_name: &str, + ) -> (CommandStartedEvent, CommandSucceededEvent) { + let mut started = None; + let mut succeeded = None; + self.retain(|ev| match (ev, &started, &succeeded) { + (Event::Command(cev), None, None) => { + if cev.command_name() == command_name { + started = Some( + cev.as_command_started() + .unwrap_or_else(|| { + panic!("first event not a command started event {:?}", cev) + }) + .clone(), + ); + } + false + } + (Event::Command(cev), Some(started), None) => { + if cev.request_id() == started.request_id { + succeeded = Some( + cev.as_command_succeeded() + .expect("second event not a command succeeded event") + .clone(), + ); + } + false + } + _ => true, + }); + match (started, succeeded) { + (Some(started), Some(succeeded)) => (started, succeeded), + _ => panic!("could not find event for {} command", command_name), + } + } + + pub(crate) fn count_pool_cleared_events(&self) -> usize { + let mut out = 0; + for event in self.all().iter() { + if matches!(event, Event::Cmap(CmapEvent::PoolCleared(_))) { + out += 1; + } + } + out + } +} + +/// Process events one at a time as they arrive asynchronously. +/// +/// New test code should prefer to use `EventBuffer`. +#[deprecated = "use EventBuffer"] +pub(crate) struct EventSubscriber<'a, T> { + buffer: &'a EventBuffer, + index: usize, + generation: Generation, +} + +#[allow(deprecated)] +impl<'a, T: Clone> EventSubscriber<'a, T> { + async fn next(&mut self, timeout: Duration) -> Option { + crate::runtime::timeout(timeout, async move { + loop { + let notified = self.buffer.inner.event_received.notified(); + if let Some(next) = self.try_next() { + return Some(next); + } + notified.await; + } + }) + .await + .unwrap_or(None) + } + + fn try_next(&mut self) -> Option { + let events = self.buffer.inner.events.lock().unwrap(); + if events.generation != self.generation { + panic!("EventBuffer cleared during EventStream iteration"); + } + if events.data.len() > self.index { + let event = events.data[self.index].0.clone(); + self.index += 1; + return Some(event); + } + None + } + + /// Consume and pass events to the provided closure until it returns Some or the timeout is hit. + pub(crate) async fn filter_map_event( + &mut self, + timeout: Duration, + mut filter_map: F, + ) -> Option + where + F: FnMut(T) -> Option, + { + runtime::timeout(timeout, async move { + loop { + let ev = self.next(timeout).await?; + if let Some(r) = filter_map(ev) { + return Some(r); + } + } + }) + .await + .unwrap_or(None) + } + + /// Waits for an event to occur within the given duration that passes the given filter. + pub(crate) async fn wait_for_event(&mut self, timeout: Duration, mut filter: F) -> Option + where + F: FnMut(&T) -> bool, + { + self.filter_map_event(timeout, |e| if filter(&e) { Some(e) } else { None }) + .await + } + + pub(crate) async fn collect_events(&mut self, timeout: Duration, mut filter: F) -> Vec + where + F: FnMut(&T) -> bool, + { + let mut events = Vec::new(); + let _ = runtime::timeout(timeout, async { + while let Some(event) = self.wait_for_event(timeout, &mut filter).await { + events.push(event); + } + }) + .await; + events + } + + #[cfg(feature = "in-use-encryption-unstable")] + pub(crate) async fn collect_events_map( + &mut self, + timeout: Duration, + mut filter: F, + ) -> Vec + where + F: FnMut(T) -> Option, + { + let mut events = Vec::new(); + let _ = runtime::timeout(timeout, async { + while let Some(event) = self.filter_map_event(timeout, &mut filter).await { + events.push(event); + } + }) + .await; + events + } + + #[cfg(feature = "in-use-encryption-unstable")] + pub(crate) async fn clear_events(&mut self, timeout: Duration) { + self.collect_events(timeout, |_| true).await; + } + + /// Returns the received events without waiting for any more. + pub(crate) fn all(&mut self, filter: F) -> Vec + where + F: Fn(&T) -> bool, + { + let events = self.buffer.inner.events.lock().unwrap(); + if events.generation != self.generation { + panic!("EventBuffer cleared during EventStream iteration"); + } + let out = events + .data + .iter() + .skip(self.index) + .map(|(e, _)| e) + .filter(|e| filter(*e)) + .cloned() + .collect(); + self.index = events.data.len(); + out + } +} + +#[allow(deprecated)] +impl<'a> EventSubscriber<'a, Event> { + /// Waits for the next CommandStartedEvent/CommandFailedEvent pair. + /// If the next CommandStartedEvent is associated with a CommandFailedEvent, this method will + /// panic. + pub(crate) async fn wait_for_successful_command_execution( + &mut self, + timeout: Duration, + command_name: impl AsRef, + ) -> Option<(CommandStartedEvent, CommandSucceededEvent)> { + runtime::timeout(timeout, async { + let started = self + .filter_map_event(Duration::MAX, |event| match event { + Event::Command(CommandEvent::Started(s)) + if s.command_name == command_name.as_ref() => + { + Some(s) + } + _ => None, + }) + .await + .unwrap(); + + let succeeded = self + .filter_map_event(Duration::MAX, |event| match event { + Event::Command(CommandEvent::Succeeded(s)) + if s.request_id == started.request_id => + { + Some(s) + } + Event::Command(CommandEvent::Failed(f)) + if f.request_id == started.request_id => + { + panic!( + "expected {} to succeed but it failed: {:#?}", + command_name.as_ref(), + f + ) + } + _ => None, + }) + .await + .unwrap(); + + (started, succeeded) + }) + .await + .ok() + } +} diff --git a/src/test/util/subscriber.rs b/src/test/util/subscriber.rs deleted file mode 100644 index 495aae006..000000000 --- a/src/test/util/subscriber.rs +++ /dev/null @@ -1,118 +0,0 @@ -use std::time::Duration; - -use crate::runtime; - -use tokio::sync::broadcast::{error::RecvError, Receiver}; - -/// A generic subscriber type that can be used to subscribe to events from any handler type -/// that publishes events to a broadcast channel. -#[derive(Debug)] -pub(crate) struct EventSubscriber<'a, H, E: Clone> { - /// A reference to the handler this subscriber is receiving events from. - /// Stored here to ensure this subscriber cannot outlive the handler that is generating its - /// events. - _handler: &'a H, - receiver: Receiver, -} - -impl EventSubscriber<'_, H, E> { - pub(crate) fn new(handler: &H, receiver: Receiver) -> EventSubscriber<'_, H, E> { - EventSubscriber { - _handler: handler, - receiver, - } - } - - /// Waits for an event to occur within the given duration that passes the given filter. - pub(crate) async fn wait_for_event(&mut self, timeout: Duration, mut filter: F) -> Option - where - F: FnMut(&E) -> bool, - { - self.filter_map_event(timeout, |e| if filter(&e) { Some(e) } else { None }) - .await - } - - pub(crate) async fn collect_events(&mut self, timeout: Duration, mut filter: F) -> Vec - where - F: FnMut(&E) -> bool, - { - let mut events = Vec::new(); - let _ = runtime::timeout(timeout, async { - while let Some(event) = self.wait_for_event(timeout, &mut filter).await { - events.push(event); - } - }) - .await; - events - } - - #[cfg(feature = "in-use-encryption-unstable")] - pub(crate) async fn collect_events_map( - &mut self, - timeout: Duration, - mut filter: F, - ) -> Vec - where - F: FnMut(E) -> Option, - { - let mut events = Vec::new(); - let _ = runtime::timeout(timeout, async { - while let Some(event) = self.filter_map_event(timeout, &mut filter).await { - events.push(event); - } - }) - .await; - events - } - - #[cfg(feature = "in-use-encryption-unstable")] - pub(crate) async fn clear_events(&mut self, timeout: Duration) { - self.collect_events(timeout, |_| true).await; - } - - /// Consume and pass events to the provided closure until it returns Some or the timeout is hit. - pub(crate) async fn filter_map_event( - &mut self, - timeout: Duration, - mut filter_map: F, - ) -> Option - where - F: FnMut(E) -> Option, - { - runtime::timeout(timeout, async { - loop { - match self.receiver.recv().await { - Ok(event) => { - if let Some(e) = filter_map(event) { - return Some(e); - } else { - continue; - } - } - // the channel hit capacity and missed some events. - Err(RecvError::Lagged(amount_skipped)) => { - panic!("receiver lagged and skipped {} events", amount_skipped) - } - Err(_) => return None, - } - } - }) - .await - .ok() - .flatten() - } - - /// Returns the received events without waiting for any more. - pub(crate) fn all(&mut self, filter: F) -> Vec - where - F: Fn(&E) -> bool, - { - let mut events = Vec::new(); - while let Ok(event) = self.receiver.try_recv() { - if filter(&event) { - events.push(event); - } - } - events - } -} diff --git a/src/test/util/trace.rs b/src/test/util/trace.rs index be36563b2..4b35cd8aa 100644 --- a/src/test/util/trace.rs +++ b/src/test/util/trace.rs @@ -1,4 +1,3 @@ -use super::subscriber::EventSubscriber; use crate::test::spec::unified_runner::{test_file::TestCase, TestFileEntity}; use serde::Serialize; use std::{ @@ -6,9 +5,11 @@ use std::{ convert::TryInto, sync::{Arc, RwLock}, }; -use tokio::sync::broadcast; use tracing::{field::Field, span, subscriber::Interest, Level, Metadata}; +#[allow(deprecated)] +use super::event_buffer::{EventBuffer, EventSubscriber}; + /// Models the data reported in a tracing event. #[derive(Debug, Clone)] pub struct TracingEvent { @@ -104,8 +105,7 @@ impl Serialize for TracingEventValue { /// [`TracingHandler::subscribe`] to create a new [`TracingSubscriber`]. #[derive(Clone, Debug)] pub(crate) struct TracingHandler { - /// Sender for the channel where events will be broadcast. - event_broadcaster: broadcast::Sender, + buffer: EventBuffer, /// Contains a map of (tracing component name, maximum verbosity level) which this handler /// will subscribe to messages for. This is stored in an Arc so that we are able @@ -122,9 +122,8 @@ impl TracingHandler { /// Initializes a new tracing handler with the specified components and corresponding maximum /// verbosity levels. pub(crate) fn new_with_levels(levels: HashMap) -> TracingHandler { - let (event_broadcaster, _) = tokio::sync::broadcast::channel(10_000); Self { - event_broadcaster, + buffer: EventBuffer::new(), levels: Arc::new(RwLock::new(levels)), } } @@ -140,8 +139,10 @@ impl TracingHandler { } /// Returns a `TracingSubscriber` that will listen for tracing events broadcast by this handler. - pub(crate) fn subscribe(&self) -> EventSubscriber { - EventSubscriber::new(self, self.event_broadcaster.subscribe()) + #[allow(deprecated)] + pub(crate) fn subscribe(&self) -> EventSubscriber { + #[allow(deprecated)] + self.buffer.subscribe() } } @@ -242,9 +243,7 @@ impl tracing::Subscriber for TracingHandler { ); let mut visitor = TracingEventVisitor::new(&mut test_event); event.record(&mut visitor); - // this only errors if no receivers are listening; we don't care if that is the case. - let _: std::result::Result> = - self.event_broadcaster.send(test_event); + self.buffer.push_event(test_event); } /// These methods all relate to spans. Since we don't create any spans ourselves or need diff --git a/tests/readme_examples.rs b/tests/readme_examples.rs index 2d3eab2e6..1b291c54d 100644 --- a/tests/readme_examples.rs +++ b/tests/readme_examples.rs @@ -56,7 +56,7 @@ async fn _inserting_documents_into_a_collection(db: mongodb::Database) -> Result ]; // Insert some documents into the "mydb.books" collection. - collection.insert_many(docs, None).await?; + collection.insert_many(docs).await?; Ok(()) } @@ -84,7 +84,7 @@ async fn _inserting_documents_into_a_typed_collection(db: mongodb::Database) -> ]; // Insert the books into "mydb.books" collection, no manual conversion to BSON necessary. - typed_collection.insert_many(books, None).await?; + typed_collection.insert_many(books).await?; Ok(()) } @@ -94,12 +94,11 @@ async fn _finding_documents_into_a_collection( ) -> Result<()> { // This trait is required to use `try_next()` on the cursor use futures::stream::TryStreamExt; - use mongodb::{bson::doc, options::FindOptions}; + use mongodb::bson::doc; // Query the books in the collection with a filter and an option. let filter = doc! { "author": "George Orwell" }; - let find_options = FindOptions::builder().sort(doc! { "title": 1 }).build(); - let mut cursor = typed_collection.find(filter, find_options).await?; + let mut cursor = typed_collection.find(filter).sort(doc! { "title": 1 }).await?; // Iterate over the results of the cursor. while let Some(book) = cursor.try_next().await? { @@ -133,9 +132,9 @@ async fn _using_the_sync_api() -> Result<()> { ]; // Insert some books into the "mydb.books" collection. - collection.insert_many(docs, None)?; + collection.insert_many(docs).run()?; - let cursor = collection.find(doc! { "author": "George Orwell" }, None)?; + let cursor = collection.find(doc! { "author": "George Orwell" }).run()?; for result in cursor { println!("title: {}", result?.title); } diff --git a/tests/transactions_example.rs b/tests/transactions_example.rs index b9d4ca8cf..153938dd4 100644 --- a/tests/transactions_example.rs +++ b/tests/transactions_example.rs @@ -2,14 +2,7 @@ use mongodb::{ bson::{doc, Document}, error::{Result, TRANSIENT_TRANSACTION_ERROR, UNKNOWN_TRANSACTION_COMMIT_RESULT}, - options::{ - Acknowledgment, - ReadConcern, - ReadPreference, - SelectionCriteria, - TransactionOptions, - WriteConcern, - }, + options::{Acknowledgment, ReadConcern, ReadPreference, SelectionCriteria, WriteConcern}, Client, ClientSession, }; @@ -44,12 +37,12 @@ async fn run_transaction_with_retry(session: &mut ClientSession) -> Result<()> { } async fn execute_transaction(session: &mut ClientSession) -> Result<()> { - let transaction_options = TransactionOptions::builder() + session + .start_transaction() .read_concern(ReadConcern::snapshot()) .write_concern(WriteConcern::builder().w(Acknowledgment::Majority).build()) .selection_criteria(SelectionCriteria::ReadPreference(ReadPreference::Primary)) - .build(); - session.start_transaction(transaction_options).await?; + .await?; let client = session.client(); let employees = client.database("hr").collection::("employees"); @@ -58,17 +51,14 @@ async fn execute_transaction(session: &mut ClientSession) -> Result<()> { employees .update_one( doc! { "employee": 3 }, - doc! { "$set": { "status": "Inactive" } } + doc! { "$set": { "status": "Inactive" } }, ) .session(&mut *session) .await?; events - .insert_one_with_session( - doc! { "employee": 3, "status": { "new": "Inactive", "old": "Active" } }, - None, - session, - ) + .insert_one(doc! { "employee": 3, "status": { "new": "Inactive", "old": "Active" } }) + .session(&mut *session) .await?; commit_with_retry(session).await From 02c54d27185b1648382d16916ac22d8c52fe9763 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 5 Apr 2024 12:22:50 -0600 Subject: [PATCH 30/75] rework cursor test --- src/test/bulk_write.rs | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 518dd1f1e..3b42eea10 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -1,8 +1,3 @@ -use rand::{ - distributions::{Alphanumeric, DistString}, - thread_rng, -}; - use crate::{ bson::doc, error::ErrorKind, @@ -14,7 +9,6 @@ use crate::{ util::event_buffer::EventBuffer, FailPoint, FailPointMode, - TestClient, }, Client, Namespace, @@ -220,23 +214,19 @@ async fn cursor_iteration() { .await; let max_bson_object_size = client.server_info.max_bson_object_size as usize; - let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; - let id_size = max_bson_object_size / max_write_batch_size; - let document = doc! { "_id": Alphanumeric.sample_string(&mut thread_rng(), id_size) }; - client - .database("bulk") - .collection("write") - .insert_one(document.clone()) - .await - .unwrap(); + let document = doc! { "_id": "a".repeat(max_bson_object_size - 500) }; + + let collection = client.database("db").collection("coll"); + collection.drop().await.unwrap(); + collection.insert_one(document.clone()).await.unwrap(); let models = vec![ WriteModel::InsertOne { - namespace: Namespace::new("bulk", "write"), + namespace: collection.namespace(), document }; - max_write_batch_size + 2 ]; let error = client.bulk_write(models).ordered(false).await.unwrap_err(); @@ -245,8 +235,8 @@ async fn cursor_iteration() { }; let write_errors = bulk_write_error.write_errors; - assert_eq!(write_errors.len(), max_write_batch_size); + assert_eq!(write_errors.len(), 2); let command_started_events = event_buffer.get_command_started_events(&["getMore"]); - assert!(!command_started_events.is_empty()); + assert_eq!(command_started_events.len(), 1); } From 8f0dd5e46e5ee587ddd9ffd5389ca0b44392af1d Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 5 Apr 2024 12:54:12 -0600 Subject: [PATCH 31/75] add pipeline tests --- .../client-bulkWrite-update-pipeline.json | 257 ++++++++++++++++++ .../client-bulkWrite-update-pipeline.yml | 132 +++++++++ 2 files changed, 389 insertions(+) create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.yml diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.json new file mode 100644 index 000000000..57b6c9c1b --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.json @@ -0,0 +1,257 @@ +{ + "description": "client bulkWrite update pipeline", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": {}, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": {}, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.yml new file mode 100644 index 000000000..fe0e29a50 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.yml @@ -0,0 +1,132 @@ +description: "client bulkWrite update pipeline" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - {_id: 1, x: 1} + - {_id: 2, x: 2} + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite updateOne with pipeline" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + - $addFields: + foo: 1 + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 1 + modifiedCount: 1 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { "$$exists": false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + - $addFields: + foo: 1 + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - {_id: 1, x: 1, foo: 1} + - {_id: 2, x: 2 } + + - description: "client bulkWrite updateMany with pipeline" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: {} + update: + - $addFields: + foo: 1 + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { "$$exists": false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { } + updateMods: + - $addFields: + foo: 1 + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - {_id: 1, x: 1, foo: 1} + - {_id: 2, x: 2, foo: 1} From 6fd1a7a045524a9bee2edfaa7747f980c69552ec Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 5 Apr 2024 13:11:21 -0600 Subject: [PATCH 32/75] validate update and replacement documents --- src/bson_util.rs | 57 ++++- src/client/options/bulk_write.rs | 16 +- .../client-bulkWrite-update-validation.json | 216 ++++++++++++++++++ .../client-bulkWrite-update-validation.yml | 79 +++++++ 4 files changed, 357 insertions(+), 11 deletions(-) create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.yml diff --git a/src/bson_util.rs b/src/bson_util.rs index 436a1582f..2138b74b4 100644 --- a/src/bson_util.rs +++ b/src/bson_util.rs @@ -70,21 +70,58 @@ pub(crate) fn first_key(document: &Document) -> Option<&str> { document.keys().next().map(String::as_str) } -pub(crate) fn replacement_raw_document_check(replacement: &RawDocumentBuf) -> Result<()> { - match replacement.iter().next().transpose()? { - Some((key, _)) if !key.starts_with('$') => Ok(()), - _ => Err(ErrorKind::InvalidArgument { - message: "replace document must have first key not starting with '$'".to_string(), +pub(crate) fn update_document_check(update: &Document) -> Result<()> { + match first_key(update) { + Some(key) => { + if !key.starts_with('$') { + Err(ErrorKind::InvalidArgument { + message: "update document must only contain update modifiers".to_string(), + } + .into()) + } else { + Ok(()) + } + } + None => Err(ErrorKind::InvalidArgument { + message: "update document must not be empty".to_string(), } .into()), } } -pub(crate) fn update_document_check(update: &Document) -> Result<()> { - match first_key(update) { - Some(s) if s.starts_with('$') => Ok(()), - _ => Err(ErrorKind::InvalidArgument { - message: "update document must have first key starting with '$".to_string(), +pub(crate) fn replacement_document_check(replacement: &Document) -> Result<()> { + match first_key(replacement) { + Some(key) => { + if key.starts_with('$') { + Err(ErrorKind::InvalidArgument { + message: "replacement document must not contain update modifiers".to_string(), + } + .into()) + } else { + Ok(()) + } + } + None => Err(ErrorKind::InvalidArgument { + message: "replacement document must not be empty".to_string(), + } + .into()), + } +} + +pub(crate) fn replacement_raw_document_check(replacement: &RawDocumentBuf) -> Result<()> { + match replacement.iter().next().transpose()? { + Some((key, _)) => { + if key.starts_with('$') { + Err(ErrorKind::InvalidArgument { + message: "replacement document must not contain update modifiers".to_string(), + } + .into()) + } else { + Ok(()) + } + } + None => Err(ErrorKind::InvalidArgument { + message: "replacement document must not be empty".to_string(), } .into()), } diff --git a/src/client/options/bulk_write.rs b/src/client/options/bulk_write.rs index 4093aed95..5517e1169 100644 --- a/src/client/options/bulk_write.rs +++ b/src/client/options/bulk_write.rs @@ -5,7 +5,7 @@ use serde_with::skip_serializing_none; use crate::{ bson::{rawdoc, Array, Bson, Document, RawDocumentBuf}, - bson_util::get_or_prepend_id_field, + bson_util::{get_or_prepend_id_field, replacement_document_check, update_document_check}, error::Result, options::UpdateModifications, Namespace, @@ -181,6 +181,20 @@ impl WriteModel { /// Returns the operation-specific fields that should be included in this model's entry in the /// ops array. Also returns an inserted ID if this is an insert operation. pub(crate) fn get_ops_document_contents(&self) -> Result<(RawDocumentBuf, Option)> { + if let Self::UpdateOne { + update: UpdateModifications::Document(update_document), + .. + } + | Self::UpdateMany { + update: UpdateModifications::Document(update_document), + .. + } = self + { + update_document_check(update_document)?; + } else if let Self::ReplaceOne { replacement, .. } = self { + replacement_document_check(replacement)?; + } + let (mut model_document, inserted_id) = match self { Self::InsertOne { document, .. } => { let mut insert_document = RawDocumentBuf::from_document(document)?; diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.json new file mode 100644 index 000000000..1ac3e8d04 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.json @@ -0,0 +1,216 @@ +{ + "description": "client-bulkWrite-update-validation", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.yml new file mode 100644 index 000000000..f597e0762 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.yml @@ -0,0 +1,79 @@ +description: "client-bulkWrite-update-validation" + +schemaVersion: "1.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: &initialData + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite replaceOne prohibits atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - replaceOne: + namespace: *namespace + filter: { _id: 1 } + replacement: { $set: { x: 22 } } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData + + - description: "client bulkWrite updateOne requires atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { x: 22 } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData + + - description: "client bulkWrite updateMany requires atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + update: { x: 44 } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData From 927e9c57ac8e5da4e8a72292a4ca341f923b5b23 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 5 Apr 2024 13:12:21 -0600 Subject: [PATCH 33/75] simplify check --- src/client/options/bulk_write.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/client/options/bulk_write.rs b/src/client/options/bulk_write.rs index 5517e1169..c8f7cfdbc 100644 --- a/src/client/options/bulk_write.rs +++ b/src/client/options/bulk_write.rs @@ -181,16 +181,10 @@ impl WriteModel { /// Returns the operation-specific fields that should be included in this model's entry in the /// ops array. Also returns an inserted ID if this is an insert operation. pub(crate) fn get_ops_document_contents(&self) -> Result<(RawDocumentBuf, Option)> { - if let Self::UpdateOne { - update: UpdateModifications::Document(update_document), - .. - } - | Self::UpdateMany { - update: UpdateModifications::Document(update_document), - .. - } = self - { - update_document_check(update_document)?; + if let Self::UpdateOne { update, .. } | Self::UpdateMany { update, .. } = self { + if let UpdateModifications::Document(update_document) = update { + update_document_check(update_document)?; + } } else if let Self::ReplaceOne { replacement, .. } = self { replacement_document_check(replacement)?; } From a2ffd7a9de53e5c1d68f0d7febb5ab7e360748ac Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 5 Apr 2024 13:41:39 -0600 Subject: [PATCH 34/75] allow empty replaces --- src/bson_util.rs | 40 +++++++++++++--------------------------- 1 file changed, 13 insertions(+), 27 deletions(-) diff --git a/src/bson_util.rs b/src/bson_util.rs index 2138b74b4..1db40e87a 100644 --- a/src/bson_util.rs +++ b/src/bson_util.rs @@ -90,41 +90,27 @@ pub(crate) fn update_document_check(update: &Document) -> Result<()> { } pub(crate) fn replacement_document_check(replacement: &Document) -> Result<()> { - match first_key(replacement) { - Some(key) => { - if key.starts_with('$') { - Err(ErrorKind::InvalidArgument { - message: "replacement document must not contain update modifiers".to_string(), - } - .into()) - } else { - Ok(()) + if let Some(key) = first_key(replacement) { + if key.starts_with('$') { + return Err(ErrorKind::InvalidArgument { + message: "replacement document must not contain update modifiers".to_string(), } + .into()); } - None => Err(ErrorKind::InvalidArgument { - message: "replacement document must not be empty".to_string(), - } - .into()), } + Ok(()) } pub(crate) fn replacement_raw_document_check(replacement: &RawDocumentBuf) -> Result<()> { - match replacement.iter().next().transpose()? { - Some((key, _)) => { - if key.starts_with('$') { - Err(ErrorKind::InvalidArgument { - message: "replacement document must not contain update modifiers".to_string(), - } - .into()) - } else { - Ok(()) + if let Some((key, _)) = replacement.iter().next().transpose()? { + if key.starts_with('$') { + return Err(ErrorKind::InvalidArgument { + message: "replacement document must not contain update modifiers".to_string(), } - } - None => Err(ErrorKind::InvalidArgument { - message: "replacement document must not be empty".to_string(), - } - .into()), + .into()); + }; } + Ok(()) } /// The size in bytes of the provided document's entry in a BSON array at the given index. From 7b11bf0b2906383f2c44c8fd3c89590b86ffe83b Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 5 Apr 2024 14:06:36 -0600 Subject: [PATCH 35/75] add failed iteration test --- src/cmap/test/integration.rs | 4 +- .../server_selection/test/in_window.rs | 2 +- src/sdam/description/topology/test/sdam.rs | 2 +- src/sdam/test.rs | 4 +- src/test/bulk_write.rs | 53 ++++++++++++++++++- src/test/change_stream.rs | 12 ++--- src/test/client.rs | 4 +- src/test/spec/gridfs.rs | 4 +- src/test/spec/retryable_reads.rs | 8 +-- src/test/spec/retryable_writes.rs | 12 ++--- src/test/spec/sdam.rs | 2 +- src/test/spec/transactions.rs | 4 +- src/test/spec/unified_runner/operation.rs | 4 +- src/test/spec/v2_runner.rs | 7 +-- src/test/util/failpoint.rs | 6 +-- 15 files changed, 86 insertions(+), 42 deletions(-) diff --git a/src/cmap/test/integration.rs b/src/cmap/test/integration.rs index 2a16febb5..6afe61130 100644 --- a/src/cmap/test/integration.rs +++ b/src/cmap/test/integration.rs @@ -189,7 +189,7 @@ async fn connection_error_during_establishment() { } let _guard = client - .configure_fail_point( + .enable_fail_point( FailPoint::new( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::Times(10), @@ -248,7 +248,7 @@ async fn connection_error_during_operation() { } let _guard = client - .configure_fail_point( + .enable_fail_point( FailPoint::new(&["ping"], FailPointMode::Times(10)).close_connection(true), ) .await diff --git a/src/sdam/description/topology/server_selection/test/in_window.rs b/src/sdam/description/topology/server_selection/test/in_window.rs index 0179a17d9..352c05355 100644 --- a/src/sdam/description/topology/server_selection/test/in_window.rs +++ b/src/sdam/description/topology/server_selection/test/in_window.rs @@ -263,7 +263,7 @@ async fn load_balancing_test() { let fail_point = FailPoint::new(&["find"], FailPointMode::AlwaysOn) .block_connection(Duration::from_millis(500)) .selection_criteria(slow_host_criteria); - let guard = setup_client.configure_fail_point(fail_point).await.unwrap(); + let guard = setup_client.enable_fail_point(fail_point).await.unwrap(); // verify that the lesser picked server (slower one) was picked less than 25% of the time. do_test(&client, &mut buffer, 0.05, 0.25, 10).await; diff --git a/src/sdam/description/topology/test/sdam.rs b/src/sdam/description/topology/test/sdam.rs index 8b6903e6f..a08ab29ab 100644 --- a/src/sdam/description/topology/test/sdam.rs +++ b/src/sdam/description/topology/test/sdam.rs @@ -684,7 +684,7 @@ async fn heartbeat_events() { ) .app_name("heartbeat_events") .error_code(1234); - let _guard = fp_client.configure_fail_point(fail_point).await.unwrap(); + let _guard = fp_client.enable_fail_point(fail_point).await.unwrap(); subscriber .wait_for_event(Duration::from_millis(500), |event| { diff --git a/src/sdam/test.rs b/src/sdam/test.rs index f8baded81..e6684e527 100644 --- a/src/sdam/test.rs +++ b/src/sdam/test.rs @@ -48,7 +48,7 @@ async fn min_heartbeat_frequency() { } let _guard = setup_client - .configure_fail_point( + .enable_fail_point( FailPoint::new( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::Times(5), @@ -134,7 +134,7 @@ async fn sdam_pool_management() { .expect("should see server heartbeat succeeded event"); let _guard = client - .configure_fail_point( + .enable_fail_point( FailPoint::new( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::Times(4), diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 3b42eea10..1c7650f17 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -121,7 +121,7 @@ async fn write_concern_error_batches() { let fail_point = FailPoint::new(&["bulkWrite"], FailPointMode::Times(2)) .write_concern_error(doc! { "code": 91, "errmsg": "Replication is being shut down" }); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let models = vec![ WriteModel::InsertOne { @@ -206,13 +206,18 @@ async fn write_error_batches() { } #[tokio::test] -async fn cursor_iteration() { +async fn successful_cursor_iteration() { let event_buffer = EventBuffer::new(); let client = Client::test_builder() .event_buffer(event_buffer.clone()) .build() .await; + if client.server_version_lt(8, 0) { + log_uncaptured("skipping successful_cursor_iteration: bulkWrite requires 8.0+"); + return; + } + let max_bson_object_size = client.server_info.max_bson_object_size as usize; let document = doc! { "_id": "a".repeat(max_bson_object_size - 500) }; @@ -240,3 +245,47 @@ async fn cursor_iteration() { let command_started_events = event_buffer.get_command_started_events(&["getMore"]); assert_eq!(command_started_events.len(), 1); } + +#[tokio::test(flavor = "multi_thread")] +async fn failed_cursor_iteration() { + let event_buffer = EventBuffer::new(); + let client = Client::test_builder() + .event_buffer(event_buffer.clone()) + .build() + .await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping failed_cursor_iteration: bulkWrite requires 8.0+"); + return; + } + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + + let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(8); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); + + let document = doc! { "_id": "a".repeat(max_bson_object_size - 500) }; + + let collection = client.database("db").collection("coll"); + collection.drop().await.unwrap(); + collection.insert_one(document.clone()).await.unwrap(); + + let models = vec![ + WriteModel::InsertOne { + namespace: collection.namespace(), + document + }; + 2 + ]; + let error = client.bulk_write(models).ordered(false).await.unwrap_err(); + + let Some(ref source) = error.source else { + panic!("Expected error to contain source"); + }; + assert_eq!(source.code(), Some(8)); + + let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { + panic!("Expected bulk write error, got {:?}", error); + }; + assert_eq!(bulk_write_error.write_errors.len(), 1); +} diff --git a/src/test/change_stream.rs b/src/test/change_stream.rs index a5df31b4f..30ece4161 100644 --- a/src/test/change_stream.rs +++ b/src/test/change_stream.rs @@ -172,7 +172,7 @@ async fn resumes_on_error() -> Result<()> { )); let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); - let _guard = client.configure_fail_point(fail_point).await?; + let _guard = client.enable_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }).await?; assert!(matches!(stream.next().await.transpose()?, @@ -201,7 +201,7 @@ async fn does_not_resume_aggregate() -> Result<()> { }; let fail_point = FailPoint::new(&["aggregate"], FailPointMode::Times(1)).error_code(43); - let _guard = client.configure_fail_point(fail_point).await?; + let _guard = client.enable_fail_point(fail_point).await?; assert!(coll.watch().await.is_err()); @@ -269,7 +269,7 @@ async fn resume_kill_cursor_error_suppressed() -> Result<()> { let fail_point = FailPoint::new(&["getMore", "killCursors"], FailPointMode::Times(1)).error_code(43); - let _guard = client.configure_fail_point(fail_point).await?; + let _guard = client.enable_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }).await?; assert!(matches!(stream.next().await.transpose()?, @@ -310,7 +310,7 @@ async fn resume_start_at_operation_time() -> Result<()> { } let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); - let _guard = client.configure_fail_point(fail_point).await?; + let _guard = client.enable_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }).await?; stream.next().await.transpose()?; @@ -521,7 +521,7 @@ async fn resume_uses_start_after() -> Result<()> { // Create an event, and synthesize a resumable error when calling `getMore` for that event. coll.insert_one(doc! {}).await?; let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); - let _guard = client.configure_fail_point(fail_point).await?; + let _guard = client.enable_fail_point(fail_point).await?; stream.next().await.transpose()?; #[allow(deprecated)] @@ -577,7 +577,7 @@ async fn resume_uses_resume_after() -> Result<()> { // Create an event, and synthesize a resumable error when calling `getMore` for that event. coll.insert_one(doc! {}).await?; let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); - let _guard = client.configure_fail_point(fail_point).await?; + let _guard = client.enable_fail_point(fail_point).await?; stream.next().await.transpose()?; #[allow(deprecated)] diff --git a/src/test/client.rs b/src/test/client.rs index 46635348c..ab5ce4284 100644 --- a/src/test/client.rs +++ b/src/test/client.rs @@ -709,7 +709,7 @@ async fn retry_commit_txn_check_out() { // Enable a fail point that clears the connection pools so that commitTransaction will create a // new connection during checkout. let fail_point = FailPoint::new(&["ping"], FailPointMode::Times(1)).error_code(11600); - let _guard = setup_client.configure_fail_point(fail_point).await.unwrap(); + let _guard = setup_client.enable_fail_point(fail_point).await.unwrap(); #[allow(deprecated)] let mut subscriber = buffer.subscribe(); @@ -759,7 +759,7 @@ async fn retry_commit_txn_check_out() { ) .error_code(11600) .app_name("retry_commit_txn_check_out"); - let _guard2 = setup_client.configure_fail_point(fail_point).await.unwrap(); + let _guard2 = setup_client.enable_fail_point(fail_point).await.unwrap(); // finally, attempt the commit. // this should succeed due to retry diff --git a/src/test/spec/gridfs.rs b/src/test/spec/gridfs.rs index b9403a47d..91acf7287 100644 --- a/src/test/spec/gridfs.rs +++ b/src/test/spec/gridfs.rs @@ -232,7 +232,7 @@ async fn upload_stream_errors() { .unwrap(); let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)).error_code(1234); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let error = get_mongo_error(upload_stream.write_all(&[11]).await); assert_eq!(error.sdam_code(), Some(1234)); @@ -249,7 +249,7 @@ async fn upload_stream_errors() { upload_stream.write_all(&[11]).await.unwrap(); let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)).error_code(1234); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let error = get_mongo_error(upload_stream.close().await); assert_eq!(error.sdam_code(), Some(1234)); diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index c0fcdd2fc..5dae20a17 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -53,7 +53,7 @@ async fn retry_releases_connection() { collection.insert_one(doc! { "x": 1 }).await.unwrap(); let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)).close_connection(true); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); runtime::timeout( Duration::from_secs(1), @@ -99,7 +99,7 @@ async fn retry_read_pool_cleared() { let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)) .error_code(91) .block_connection(Duration::from_secs(1)); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); #[allow(deprecated)] let mut subscriber = buffer.subscribe(); @@ -182,7 +182,7 @@ async fn retry_read_different_mongos() { let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)) .error_code(6) .close_connection(true); - guards.push(client.configure_fail_point(fail_point).await.unwrap()); + guards.push(client.enable_fail_point(fail_point).await.unwrap()); } #[allow(deprecated)] @@ -243,7 +243,7 @@ async fn retry_read_same_mongos() { let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)) .error_code(6) .close_connection(true); - client.configure_fail_point(fail_point).await.unwrap() + client.enable_fail_point(fail_point).await.unwrap() }; #[allow(deprecated)] diff --git a/src/test/spec/retryable_writes.rs b/src/test/spec/retryable_writes.rs index 32b7e5bdd..c2484d8d7 100644 --- a/src/test/spec/retryable_writes.rs +++ b/src/test/spec/retryable_writes.rs @@ -86,7 +86,7 @@ async fn run_legacy() { let guard = if let Some(fail_point) = test_case.fail_point { Some( client - .configure_fail_point(fail_point) + .enable_fail_point(fail_point) .await .expect(&test_case.description), ) @@ -425,7 +425,7 @@ async fn retry_write_pool_cleared() { .error_code(91) .block_connection(Duration::from_secs(1)) .error_labels(vec![RETRYABLE_WRITE_ERROR]); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); #[allow(deprecated)] let mut subscriber = buffer.subscribe(); @@ -519,7 +519,7 @@ async fn retry_write_retryable_write_error() { client .as_ref() .unwrap() - .configure_fail_point(fail_point) + .enable_fail_point(fail_point) .await .unwrap() }; @@ -548,7 +548,7 @@ async fn retry_write_retryable_write_error() { "code": 91, "errorLabels": ["RetryableWriteError"], }); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let result = client .database("test") @@ -590,7 +590,7 @@ async fn retry_write_different_mongos() { .error_code(6) .error_labels(vec![RETRYABLE_WRITE_ERROR]) .close_connection(true); - guards.push(client.configure_fail_point(fail_point).await.unwrap()); + guards.push(client.enable_fail_point(fail_point).await.unwrap()); } #[allow(deprecated)] @@ -652,7 +652,7 @@ async fn retry_write_same_mongos() { .error_code(6) .error_labels(vec![RETRYABLE_WRITE_ERROR]) .close_connection(true); - client.configure_fail_point(fail_point).await.unwrap() + client.enable_fail_point(fail_point).await.unwrap() }; #[allow(deprecated)] diff --git a/src/test/spec/sdam.rs b/src/test/spec/sdam.rs index c60e35ab0..725f8e2dc 100644 --- a/src/test/spec/sdam.rs +++ b/src/test/spec/sdam.rs @@ -209,7 +209,7 @@ async fn rtt_is_updated() { ) .block_connection(Duration::from_millis(500)) .app_name(app_name); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let mut watcher = client.topology().watch(); runtime::timeout(Duration::from_secs(10), async move { diff --git a/src/test/spec/transactions.rs b/src/test/spec/transactions.rs index f0ec76856..18ab3eddf 100644 --- a/src/test/spec/transactions.rs +++ b/src/test/spec/transactions.rs @@ -213,7 +213,7 @@ async fn convenient_api_retry_timeout_commit_unknown() { let fail_point = FailPoint::new(&["commitTransaction"], FailPointMode::Times(1)) .error_code(251) .error_labels(vec![UNKNOWN_TRANSACTION_COMMIT_RESULT]); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let result = session .start_transaction() @@ -258,7 +258,7 @@ async fn convenient_api_retry_timeout_commit_transient() { let fail_point = FailPoint::new(&["commitTransaction"], FailPointMode::Times(1)) .error_code(251) .error_labels(vec![TRANSIENT_TRANSACTION_ERROR]); - let _guard = client.configure_fail_point(fail_point).await.unwrap(); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let result = session .start_transaction() diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index f1ed154df..795a6495c 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -1348,7 +1348,7 @@ impl TestOperation for FailPointCommand { async move { let client = test_runner.get_client(&self.client).await; let guard = client - .configure_fail_point(self.fail_point.clone()) + .enable_fail_point(self.fail_point.clone()) .await .unwrap(); test_runner.fail_point_guards.write().await.push(guard); @@ -1381,7 +1381,7 @@ impl TestOperation for TargetedFailPoint { .await; let guard = test_runner .internal_client - .configure_fail_point( + .enable_fail_point( self.fail_point .clone() .selection_criteria(selection_criteria), diff --git a/src/test/spec/v2_runner.rs b/src/test/spec/v2_runner.rs index 141b83625..d5e230211 100644 --- a/src/test/spec/v2_runner.rs +++ b/src/test/spec/v2_runner.rs @@ -233,10 +233,7 @@ impl TestContext { // Persist fail point guards so they disable post-test. let mut fail_point_guards: Vec = Vec::new(); if let Some(ref fail_point) = test.fail_point { - let guard = client - .configure_fail_point(fail_point.clone()) - .await - .unwrap(); + let guard = client.enable_fail_point(fail_point.clone()).await.unwrap(); fail_point_guards.push(guard); } @@ -419,7 +416,7 @@ impl<'a> OpRunner<'a> { let guard = self .client - .configure_fail_point(fail_point.selection_criteria(selection_criteria)) + .enable_fail_point(fail_point.selection_criteria(selection_criteria)) .await .unwrap(); self.fail_point_guards.push(guard); diff --git a/src/test/util/failpoint.rs b/src/test/util/failpoint.rs index db18ea47d..69d178ded 100644 --- a/src/test/util/failpoint.rs +++ b/src/test/util/failpoint.rs @@ -15,10 +15,7 @@ impl Client { /// #[tokio::test(flavor = "multi_thread")] test annotation. The guard returned from this /// method should remain in scope while the fail point is intended for use. Upon drop, the /// guard will disable the fail point on the server. - pub(crate) async fn configure_fail_point( - &self, - fail_point: FailPoint, - ) -> Result { + pub(crate) async fn enable_fail_point(&self, fail_point: FailPoint) -> Result { let command = bson::to_document(&fail_point)?; self.database("admin") .run_command(command) @@ -135,6 +132,7 @@ pub(crate) enum FailPointMode { } #[derive(Debug)] +#[must_use] pub(crate) struct FailPointGuard { client: Client, failure_type: String, From 6f2606772cb65b1763e83cf4c9adf839df6e30c1 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 9 Apr 2024 10:10:01 -0600 Subject: [PATCH 36/75] empty models test --- .../new-bulk-write/client-bulkWrite-errors.json | 16 ++++++++++++++++ .../new-bulk-write/client-bulkWrite-errors.yml | 9 +++++++++ 2 files changed, 25 insertions(+) diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json index a33dffd50..0c3849973 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json @@ -433,6 +433,22 @@ } } ] + }, + { + "description": "an empty list of write models is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [], + "verboseResults": true + }, + "expectError": { + "isClientError": true + } + } + ] } ] } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml index 7c587f824..97ce17560 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml @@ -229,3 +229,12 @@ tests: writeConcernErrors: - code: *writeConcernErrorCode message: *writeConcernErrorMessage + - description: "an empty list of write models is a client-side error" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: [] + verboseResults: true + expectError: + isClientError: true From 04f06bd38eae59e98552a563b8300c47a4e03f0b Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 9 Apr 2024 10:22:17 -0600 Subject: [PATCH 37/75] write concern --- src/action/bulk_write.rs | 3 +- src/client/options/bulk_write.rs | 10 ++- .../client-bulkWrite-options.json | 78 +++++++++++++++++++ .../client-bulkWrite-options.yml | 44 ++++++++++- 4 files changed, 131 insertions(+), 4 deletions(-) diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index a35a7f0bc..5d65d47a3 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -6,7 +6,7 @@ use crate::{ bson::{Bson, Document}, error::{ClientBulkWriteError, Error, ErrorKind, Result}, operation::bulk_write::BulkWrite as BulkWriteOperation, - options::{BulkWriteOptions, WriteModel}, + options::{BulkWriteOptions, WriteConcern, WriteModel}, results::BulkWriteResult, Client, ClientSession, @@ -53,6 +53,7 @@ impl<'a> BulkWrite<'a> { comment: Bson, let_vars: Document, verbose_results: bool, + write_concern: WriteConcern, ); pub fn session(mut self, session: &'a mut ClientSession) -> BulkWrite<'a> { diff --git a/src/client/options/bulk_write.rs b/src/client/options/bulk_write.rs index c8f7cfdbc..c04e6260a 100644 --- a/src/client/options/bulk_write.rs +++ b/src/client/options/bulk_write.rs @@ -7,7 +7,7 @@ use crate::{ bson::{rawdoc, Array, Bson, Document, RawDocumentBuf}, bson_util::{get_or_prepend_id_field, replacement_document_check, update_document_check}, error::Result, - options::UpdateModifications, + options::{UpdateModifications, WriteConcern}, Namespace, }; @@ -22,6 +22,7 @@ pub struct BulkWriteOptions { #[serde(rename = "let")] pub let_vars: Option, pub verbose_results: Option, + pub write_concern: Option, } impl Serialize for BulkWriteOptions { @@ -35,6 +36,7 @@ impl Serialize for BulkWriteOptions { comment, let_vars, verbose_results, + write_concern, } = self; let mut map_serializer = serializer.serialize_map(None)?; @@ -44,7 +46,7 @@ impl Serialize for BulkWriteOptions { if let Some(bypass_document_validation) = bypass_document_validation { map_serializer - .serialize_entry("bypassDocumentValidation", &bypass_document_validation)?; + .serialize_entry("bypassDocumentValidation", bypass_document_validation)?; } if let Some(ref comment) = comment { @@ -58,6 +60,10 @@ impl Serialize for BulkWriteOptions { let errors_only = verbose_results.map(|b| !b).unwrap_or(true); map_serializer.serialize_entry("errorsOnly", &errors_only)?; + if let Some(ref write_concern) = write_concern { + map_serializer.serialize_entry("writeConcern", write_concern)?; + } + map_serializer.end() } } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json index e0e1aa225..fcee08079 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json @@ -54,6 +54,9 @@ "let": { "id1": 1, "id2": 2 + }, + "writeConcern": { + "w": "majority" } }, "tests": [ @@ -474,6 +477,81 @@ ] } ] + }, + { + "description": "client bulkWrite writeConcern", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] } ] } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml index a82108c08..655af71b5 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml @@ -25,8 +25,9 @@ initialData: _yamlAnchors: namespace: &namespace "crud-tests.coll0" - comment: &comment { "bulk": "write" } + comment: &comment { bulk: "write" } let: &let { id1: 1, id2: 2 } + writeConcern: &writeConcern { w: "majority" } tests: - description: "client bulkWrite comment" @@ -229,3 +230,44 @@ tests: - { _id: 1, x: 11 } - { _id: 2, x: 22 } - { _id: 3, x: 33 } + - description: "client bulkWrite writeConcern" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + writeConcern: *writeConcern + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: *writeConcern + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + + + From 4a07b61d5f305a7986b975e159f01d4536a9a98b Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 9 Apr 2024 10:46:13 -0600 Subject: [PATCH 38/75] more write concern tests --- src/action/bulk_write.rs | 6 + .../client-bulkWrite-options.json | 158 ++++++++++++++++++ .../client-bulkWrite-options.yml | 88 +++++++++- 3 files changed, 247 insertions(+), 5 deletions(-) diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index 5d65d47a3..29444a131 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -67,6 +67,12 @@ impl<'a> Action for BulkWrite<'a> { type Future = BulkWriteFuture; async fn execute(mut self) -> Result { + resolve_write_concern_with_session!( + self.client, + self.options, + self.session.as_deref_mut() + )?; + let mut total_attempted = 0; let mut execution_status = ExecutionStatus::None; diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json index fcee08079..a1e6af3bf 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json @@ -15,6 +15,17 @@ ] } }, + { + "client": { + "id": "writeConcernClient", + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, { "database": { "id": "database0", @@ -552,6 +563,153 @@ ] } ] + }, + { + "description": "client bulkWrite inherits writeConcern from client", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": 1 + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern option overrides client writeConcern", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] } ] } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml index 655af71b5..461944574 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml @@ -7,6 +7,11 @@ createEntities: - client: id: &client0 client0 observeEvents: [ commandStartedEvent ] + - client: + id: &writeConcernClient writeConcernClient + uriOptions: + &clientWriteConcern { w: 1 } + observeEvents: [ commandStartedEvent ] - database: id: &database0 database0 client: *client0 @@ -27,7 +32,7 @@ _yamlAnchors: namespace: &namespace "crud-tests.coll0" comment: &comment { bulk: "write" } let: &let { id1: 1, id2: 2 } - writeConcern: &writeConcern { w: "majority" } + writeConcern: &majorityWriteConcern { w: "majority" } tests: - description: "client bulkWrite comment" @@ -239,7 +244,7 @@ tests: - insertOne: namespace: *namespace document: { _id: 3, x: 33 } - writeConcern: *writeConcern + writeConcern: *majorityWriteConcern verboseResults: true expectResult: insertedCount: 1 @@ -262,12 +267,85 @@ tests: bulkWrite: 1 errorsOnly: false ordered: true - writeConcern: *writeConcern + writeConcern: *majorityWriteConcern + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + - description: "client bulkWrite inherits writeConcern from client" + operations: + - object: *writeConcernClient + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *writeConcernClient + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: { w: 1 } + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + - description: "client bulkWrite writeConcern option overrides client writeConcern" + operations: + - object: *writeConcernClient + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + writeConcern: *majorityWriteConcern + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *writeConcernClient + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: *majorityWriteConcern ops: - insert: 0 document: { _id: 3, x: 33 } nsInfo: - ns: *namespace - - From 37852df9b120bce7136772e0ab047b2a0e30c650 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 9 Apr 2024 10:51:05 -0600 Subject: [PATCH 39/75] add retryWrites:false test --- .../client-bulkWrite-serverErrors.json | 94 +++++++++++++++++++ .../unified/client-bulkWrite-serverErrors.yml | 45 +++++++++ 2 files changed, 139 insertions(+) diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json index fd517adb5..f9812241f 100644 --- a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json @@ -21,6 +21,18 @@ "useMultipleMongoses": false } }, + { + "client": { + "id": "clientRetryWritesFalse", + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, { "database": { "id": "database0", @@ -773,6 +785,88 @@ ] } ] + }, + { + "description": "client bulkWrite with retryWrites: false does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "clientRetryWritesFalse", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "clientRetryWritesFalse", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "clientRetryWritesFalse", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] } ] } diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml index e5022870c..d77e491a9 100644 --- a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml @@ -12,6 +12,12 @@ createEntities: id: &client0 client0 observeEvents: [ commandStartedEvent ] useMultipleMongoses: false + - client: + id: &clientRetryWritesFalse clientRetryWritesFalse + uriOptions: + retryWrites: false + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false - database: id: &database0 database0 client: *client0 @@ -365,3 +371,42 @@ tests: multi: true nsInfo: - ns: *namespace + - description: "client bulkWrite with retryWrites: false does not retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *clientRetryWritesFalse + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *clientRetryWritesFalse + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + expectError: + errorCode: 189 + errorLabelsContain: [ RetryableWriteError ] + expectEvents: + - client: *clientRetryWritesFalse + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace From 733cfb98acbeaf736ba7bdd504445f53c900c75b Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 9 Apr 2024 14:24:57 -0600 Subject: [PATCH 40/75] remove OperationResponse --- src/client/executor.rs | 18 ++- src/operation.rs | 32 +----- src/operation/abort_transaction.rs | 11 +- src/operation/aggregate.rs | 20 ++-- src/operation/aggregate/change_stream.rs | 22 ++-- src/operation/bulk_write.rs | 135 +++++++++++------------ src/operation/commit_transaction.rs | 11 +- src/operation/count.rs | 14 +-- src/operation/count_documents.rs | 17 ++- src/operation/create.rs | 14 +-- src/operation/create_indexes.rs | 20 ++-- src/operation/delete.rs | 12 +- src/operation/distinct.rs | 14 +-- src/operation/drop_collection.rs | 14 +-- src/operation/drop_database.rs | 14 +-- src/operation/drop_indexes.rs | 9 +- src/operation/find.rs | 20 ++-- src/operation/find_and_modify.rs | 14 ++- src/operation/get_more.rs | 14 +-- src/operation/insert.rs | 57 +++++----- src/operation/list_collections.rs | 24 ++-- src/operation/list_databases.rs | 14 +-- src/operation/list_indexes.rs | 24 ++-- src/operation/raw_output.rs | 9 +- src/operation/run_command.rs | 12 +- src/operation/run_cursor_command.rs | 21 ++-- src/operation/search_index.rs | 21 ++-- src/operation/update.rs | 11 +- 28 files changed, 308 insertions(+), 310 deletions(-) diff --git a/src/client/executor.rs b/src/client/executor.rs index 553151512..bae4e776b 100644 --- a/src/client/executor.rs +++ b/src/client/executor.rs @@ -54,7 +54,6 @@ use crate::{ CommandErrorBody, CommitTransaction, Operation, - OperationResponse, Retryability, }, options::{ChangeStreamOptions, SelectionCriteria}, @@ -789,15 +788,14 @@ impl Client { } }; - let response_result = match op.handle_response( - response, - connection.stream_description()?, - session.as_deref_mut(), - ) { - OperationResponse::Sync(result) => result, - OperationResponse::Async(future) => future.await, - }; - match response_result { + match op + .handle_response( + response, + connection.stream_description()?, + session.as_deref_mut(), + ) + .await + { Ok(response) => Ok(response), Err(mut err) => { err.add_labels_and_update_pin( diff --git a/src/operation.rs b/src/operation.rs index 7e1bbb012..b02d96d25 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -75,32 +75,6 @@ const MAX_ENCRYPTED_WRITE_SIZE: usize = 2_097_152; // The amount of overhead bytes to account for when building a document sequence. const COMMAND_OVERHEAD_SIZE: usize = 16_000; -pub(crate) enum OperationResponse<'a, O> { - Sync(Result), - Async(BoxFuture<'a, Result>), -} - -impl<'a, O> OperationResponse<'a, O> { - /// Returns the sync result contained within this `OperationResponse`. Use responsibly, when it - /// is known that the response is not async. - fn as_sync_result(self) -> Result { - match self { - Self::Sync(result) => result, - Self::Async(_) => Err(Error::internal( - "get_sync_result was called on an async response", - )), - } - } -} - -macro_rules! handle_response_sync { - ($result:block) => { - let result = || $result; - OperationResponse::Sync(result()) - }; -} -use handle_response_sync; - /// A trait modeling the behavior of a server side operation. /// /// No methods in this trait should have default behaviors to ensure that wrapper operations @@ -129,7 +103,7 @@ pub(crate) trait Operation { response: RawCommandResponse, description: &'a StreamDescription, session: Option<&'a mut ClientSession>, - ) -> OperationResponse<'a, Self::O>; + ) -> BoxFuture<'a, Result>; /// Interpret an error encountered while sending the built command to the server, potentially /// recovering. @@ -443,7 +417,7 @@ pub(crate) trait OperationWithDefaults { response: RawCommandResponse, description: &'a StreamDescription, session: Option<&'a mut ClientSession>, - ) -> OperationResponse<'a, Self::O>; + ) -> BoxFuture<'a, Result>; /// Interpret an error encountered while sending the built command to the server, potentially /// recovering. @@ -510,7 +484,7 @@ impl Operation for T { response: RawCommandResponse, description: &'a StreamDescription, session: Option<&'a mut ClientSession>, - ) -> OperationResponse<'a, Self::O> { + ) -> BoxFuture<'a, Result> { self.handle_response(response, description, session) } fn handle_error(&self, error: Error) -> Result { diff --git a/src/operation/abort_transaction.rs b/src/operation/abort_transaction.rs index 90de0dc50..16e43bae9 100644 --- a/src/operation/abort_transaction.rs +++ b/src/operation/abort_transaction.rs @@ -1,4 +1,5 @@ use bson::Document; +use futures_util::FutureExt; use crate::{ bson::doc, @@ -8,10 +9,11 @@ use crate::{ operation::Retryability, options::WriteConcern, selection_criteria::SelectionCriteria, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse, OperationWithDefaults, WriteConcernOnlyBody}; +use super::{OperationWithDefaults, WriteConcernOnlyBody}; pub(crate) struct AbortTransaction { write_concern: Option, @@ -55,11 +57,12 @@ impl OperationWithDefaults for AbortTransaction { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: WriteConcernOnlyBody = response.body()?; response.validate() - }} + } + .boxed() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/aggregate.rs b/src/operation/aggregate.rs index c4ef14506..2d136ae4e 100644 --- a/src/operation/aggregate.rs +++ b/src/operation/aggregate.rs @@ -1,5 +1,7 @@ pub(crate) mod change_stream; +use futures_util::FutureExt; + use crate::{ bson::{doc, Bson, Document}, bson_util, @@ -8,14 +10,13 @@ use crate::{ error::Result, operation::{append_options, remove_empty_write_concern, Retryability}, options::{AggregateOptions, SelectionCriteria, WriteConcern}, + BoxFuture, ClientSession, Namespace, }; use super::{ - handle_response_sync, CursorBody, - OperationResponse, OperationWithDefaults, WriteConcernOnlyBody, SERVER_4_2_0_WIRE_VERSION, @@ -82,13 +83,13 @@ impl OperationWithDefaults for Aggregate { CursorBody::extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + description: &'a StreamDescription, + _session: Option<&'a mut ClientSession>, + ) -> BoxFuture<'a, Result> { + async move { let cursor_response: CursorBody = response.body()?; if self.is_out_or_merge() { @@ -110,7 +111,8 @@ impl OperationWithDefaults for Aggregate { self.options.as_ref().and_then(|opts| opts.max_await_time), comment, )) - }} + } + .boxed() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/aggregate/change_stream.rs b/src/operation/aggregate/change_stream.rs index 4fe63858a..a34dca3d4 100644 --- a/src/operation/aggregate/change_stream.rs +++ b/src/operation/aggregate/change_stream.rs @@ -1,3 +1,5 @@ +use futures_util::FutureExt; + use crate::{ bson::{doc, Document}, change_stream::{event::ResumeToken, ChangeStreamData, WatchArgs}, @@ -6,10 +8,11 @@ use crate::{ error::Result, operation::{append_options, OperationWithDefaults, Retryability}, options::{ChangeStreamOptions, SelectionCriteria, WriteConcern}, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, Aggregate, OperationResponse}; +use super::Aggregate; pub(crate) struct ChangeStreamAggregate { inner: Aggregate, @@ -83,13 +86,13 @@ impl OperationWithDefaults for ChangeStreamAggregate { self.inner.extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, - session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + description: &'a StreamDescription, + session: Option<&'a mut ClientSession>, + ) -> BoxFuture<'a, Result> { + async move { let op_time = response .raw_body() .get("operationTime")? @@ -97,7 +100,7 @@ impl OperationWithDefaults for ChangeStreamAggregate { let spec = self .inner .handle_response(response, description, session) - .as_sync_result()?; + .await?; let mut data = ChangeStreamData { resume_token: ResumeToken::initial(self.args.options.as_ref(), &spec), @@ -117,7 +120,8 @@ impl OperationWithDefaults for ChangeStreamAggregate { } Ok((spec, data)) - }} + } + .boxed() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 32e4a8cef..f6a6548cb 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -14,6 +14,7 @@ use crate::{ operation::OperationWithDefaults, options::{BulkWriteOptions, OperationType, WriteModel}, results::{BulkWriteResult, DeleteResult, InsertOneResult, UpdateResult}, + BoxFuture, Client, ClientSession, Cursor, @@ -21,13 +22,7 @@ use crate::{ SessionCursor, }; -use super::{ - OperationResponse, - Retryability, - WriteResponseBody, - COMMAND_OVERHEAD_SIZE, - MAX_ENCRYPTED_WRITE_SIZE, -}; +use super::{Retryability, WriteResponseBody, COMMAND_OVERHEAD_SIZE, MAX_ENCRYPTED_WRITE_SIZE}; use server_responses::*; @@ -271,80 +266,78 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { response: RawCommandResponse, description: &'b StreamDescription, session: Option<&'b mut ClientSession>, - ) -> OperationResponse<'b, Self::O> { - OperationResponse::Async( - async move { - let response: WriteResponseBody = response.body()?; - - let mut bulk_write_error = ClientBulkWriteError::default(); - - // A partial result with summary info should only be created if one or more - // operations were successful. - if response.summary.n_errors < self.n_attempted as i64 { - bulk_write_error - .partial_result - .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) - .populate_summary_info(&response.summary); - } + ) -> BoxFuture<'b, Result> { + async move { + let response: WriteResponseBody = response.body()?; + + let mut bulk_write_error = ClientBulkWriteError::default(); + + // A partial result with summary info should only be created if one or more + // operations were successful. + if response.summary.n_errors < self.n_attempted as i64 { + bulk_write_error + .partial_result + .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) + .populate_summary_info(&response.summary); + } - if let Some(write_concern_error) = response.write_concern_error { - bulk_write_error - .write_concern_errors - .push(write_concern_error); - } + if let Some(write_concern_error) = response.write_concern_error { + bulk_write_error + .write_concern_errors + .push(write_concern_error); + } - let specification = CursorSpecification::new( - response.body.cursor, - description.server_address.clone(), - None, - None, - self.options.and_then(|options| options.comment.clone()), - ); - let iteration_result = match session { - Some(session) => { - let mut session_cursor = - SessionCursor::new(self.client.clone(), specification, None); - self.iterate_results_cursor( - session_cursor.stream(session), - &mut bulk_write_error, - ) + let specification = CursorSpecification::new( + response.body.cursor, + description.server_address.clone(), + None, + None, + self.options.and_then(|options| options.comment.clone()), + ); + let iteration_result = match session { + Some(session) => { + let mut session_cursor = + SessionCursor::new(self.client.clone(), specification, None); + self.iterate_results_cursor( + session_cursor.stream(session), + &mut bulk_write_error, + ) + .await + } + None => { + let cursor = Cursor::new(self.client.clone(), specification, None, None); + self.iterate_results_cursor(cursor, &mut bulk_write_error) .await - } - None => { - let cursor = Cursor::new(self.client.clone(), specification, None, None); - self.iterate_results_cursor(cursor, &mut bulk_write_error) - .await - } - }; - - match iteration_result { - Ok(()) => { - if bulk_write_error.write_errors.is_empty() - && bulk_write_error.write_concern_errors.is_empty() - { - Ok(bulk_write_error - .partial_result - .unwrap_or_else(|| BulkWriteResult::new(self.is_verbose()))) - } else { - let error = Error::new( - ErrorKind::ClientBulkWrite(bulk_write_error), - response.labels, - ); - Err(error) - } - } - Err(error) => { + } + }; + + match iteration_result { + Ok(()) => { + if bulk_write_error.write_errors.is_empty() + && bulk_write_error.write_concern_errors.is_empty() + { + Ok(bulk_write_error + .partial_result + .unwrap_or_else(|| BulkWriteResult::new(self.is_verbose()))) + } else { let error = Error::new( ErrorKind::ClientBulkWrite(bulk_write_error), response.labels, - ) - .with_source(error); + ); Err(error) } } + Err(error) => { + let error = Error::new( + ErrorKind::ClientBulkWrite(bulk_write_error), + response.labels, + ) + .with_source(error); + Err(error) + } } - .boxed(), - ) + } + .boxed() } fn retryability(&self) -> Retryability { diff --git a/src/operation/commit_transaction.rs b/src/operation/commit_transaction.rs index f035d673d..d08917b4b 100644 --- a/src/operation/commit_transaction.rs +++ b/src/operation/commit_transaction.rs @@ -1,16 +1,18 @@ use std::time::Duration; use bson::{doc, Document}; +use futures_util::FutureExt; use crate::{ cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, operation::{append_options, remove_empty_write_concern, OperationWithDefaults, Retryability}, options::{Acknowledgment, TransactionOptions, WriteConcern}, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse, WriteConcernOnlyBody}; +use super::WriteConcernOnlyBody; pub(crate) struct CommitTransaction { options: Option, @@ -48,11 +50,12 @@ impl OperationWithDefaults for CommitTransaction { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: WriteConcernOnlyBody = response.body()?; response.validate() - }} + } + .boxed() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/count.rs b/src/operation/count.rs index 76a3fe3e7..37c33590b 100644 --- a/src/operation/count.rs +++ b/src/operation/count.rs @@ -1,18 +1,17 @@ -use bson::Document; +use futures_util::FutureExt; use serde::Deserialize; use crate::{ - bson::doc, + bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, coll::{options::EstimatedDocumentCountOptions, Namespace}, error::{Error, Result}, operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::SelectionCriteria, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse}; - pub(crate) struct Count { ns: Namespace, options: Option, @@ -50,11 +49,12 @@ impl OperationWithDefaults for Count { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response_body: ResponseBody = response.body()?; Ok(response_body.n) - }} + } + .boxed() } fn handle_error(&self, error: Error) -> Result { diff --git a/src/operation/count_documents.rs b/src/operation/count_documents.rs index 515e8e3a8..7afef76f5 100644 --- a/src/operation/count_documents.rs +++ b/src/operation/count_documents.rs @@ -1,5 +1,6 @@ use std::convert::TryInto; +use futures_util::FutureExt; use serde::Deserialize; use crate::{ @@ -9,17 +10,12 @@ use crate::{ operation::aggregate::Aggregate, options::{AggregateOptions, CountOptions}, selection_criteria::SelectionCriteria, + BoxFuture, ClientSession, Namespace, }; -use super::{ - handle_response_sync, - OperationResponse, - OperationWithDefaults, - Retryability, - SingleCursorResult, -}; +use super::{OperationWithDefaults, Retryability, SingleCursorResult}; pub(crate) struct CountDocuments { aggregate: Aggregate, @@ -100,11 +96,12 @@ impl OperationWithDefaults for CountDocuments { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: SingleCursorResult = response.body()?; Ok(response.0.map(|r| r.n).unwrap_or(0)) - }} + } + .boxed() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/create.rs b/src/operation/create.rs index 54f9f411c..6c9a2696a 100644 --- a/src/operation/create.rs +++ b/src/operation/create.rs @@ -1,7 +1,7 @@ -use bson::Document; +use futures_util::FutureExt; use crate::{ - bson::doc, + bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, operation::{ @@ -11,12 +11,11 @@ use crate::{ WriteConcernOnlyBody, }, options::{CreateCollectionOptions, WriteConcern}, + BoxFuture, ClientSession, Namespace, }; -use super::{handle_response_sync, OperationResponse}; - #[derive(Debug)] pub(crate) struct Create { ns: Namespace, @@ -55,11 +54,12 @@ impl OperationWithDefaults for Create { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: WriteConcernOnlyBody = response.body()?; response.validate() - }} + } + .boxed() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/create_indexes.rs b/src/operation/create_indexes.rs index 317ddde0c..f1cbd513c 100644 --- a/src/operation/create_indexes.rs +++ b/src/operation/create_indexes.rs @@ -1,3 +1,5 @@ +use futures_util::FutureExt; + use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -6,11 +8,12 @@ use crate::{ operation::{append_options, remove_empty_write_concern, OperationWithDefaults}, options::{CreateIndexOptions, WriteConcern}, results::CreateIndexesResult, + BoxFuture, ClientSession, Namespace, }; -use super::{handle_response_sync, OperationResponse, WriteConcernOnlyBody}; +use super::WriteConcernOnlyBody; #[derive(Debug)] pub(crate) struct CreateIndexes { @@ -71,18 +74,19 @@ impl OperationWithDefaults for CreateIndexes { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + _description: &'a StreamDescription, + _session: Option<&'a mut ClientSession>, + ) -> BoxFuture<'a, Result> { + async move { let response: WriteConcernOnlyBody = response.body()?; response.validate()?; let index_names = self.indexes.iter().filter_map(|i| i.get_name()).collect(); Ok(CreateIndexesResult { index_names }) - }} + } + .boxed() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/delete.rs b/src/operation/delete.rs index 23fa2aaa5..3a5ebdd06 100644 --- a/src/operation/delete.rs +++ b/src/operation/delete.rs @@ -1,3 +1,5 @@ +use futures_util::FutureExt; + use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -13,11 +15,10 @@ use crate::{ }, options::{DeleteOptions, Hint, WriteConcern}, results::DeleteResult, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse}; - #[derive(Debug)] pub(crate) struct Delete { ns: Namespace, @@ -87,15 +88,16 @@ impl OperationWithDefaults for Delete { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: WriteResponseBody = response.body()?; response.validate().map_err(convert_bulk_errors)?; Ok(DeleteResult { deleted_count: response.n, }) - }} + } + .boxed() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/distinct.rs b/src/operation/distinct.rs index ea48b7313..737f8339e 100644 --- a/src/operation/distinct.rs +++ b/src/operation/distinct.rs @@ -1,18 +1,17 @@ -use bson::RawBsonRef; +use futures_util::FutureExt; use serde::Deserialize; use crate::{ - bson::{doc, Bson, Document}, + bson::{doc, Bson, Document, RawBsonRef}, cmap::{Command, RawCommandResponse, StreamDescription}, coll::{options::DistinctOptions, Namespace}, error::Result, operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::SelectionCriteria, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse}; - pub(crate) struct Distinct { ns: Namespace, field_name: String, @@ -76,11 +75,12 @@ impl OperationWithDefaults for Distinct { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: Response = response.body()?; Ok(response.values) - }} + } + .boxed() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/drop_collection.rs b/src/operation/drop_collection.rs index 686c28565..d62aa481a 100644 --- a/src/operation/drop_collection.rs +++ b/src/operation/drop_collection.rs @@ -1,7 +1,7 @@ -use bson::Document; +use futures_util::FutureExt; use crate::{ - bson::doc, + bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, error::{Error, Result}, operation::{ @@ -11,12 +11,11 @@ use crate::{ WriteConcernOnlyBody, }, options::{DropCollectionOptions, WriteConcern}, + BoxFuture, ClientSession, Namespace, }; -use super::{handle_response_sync, OperationResponse}; - #[derive(Debug)] pub(crate) struct DropCollection { ns: Namespace, @@ -55,11 +54,12 @@ impl OperationWithDefaults for DropCollection { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: WriteConcernOnlyBody = response.body()?; response.validate() - }} + } + .boxed() } fn handle_error(&self, error: Error) -> Result { diff --git a/src/operation/drop_database.rs b/src/operation/drop_database.rs index 3525345e8..b0fa64917 100644 --- a/src/operation/drop_database.rs +++ b/src/operation/drop_database.rs @@ -1,7 +1,7 @@ -use bson::Document; +use futures_util::FutureExt; use crate::{ - bson::doc, + bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, db::options::DropDatabaseOptions, error::Result, @@ -12,11 +12,10 @@ use crate::{ WriteConcernOnlyBody, }, options::WriteConcern, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse}; - #[derive(Debug)] pub(crate) struct DropDatabase { target_db: String, @@ -55,11 +54,12 @@ impl OperationWithDefaults for DropDatabase { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: WriteConcernOnlyBody = response.body()?; response.validate() - }} + } + .boxed() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/drop_indexes.rs b/src/operation/drop_indexes.rs index 8441f62ed..17236646e 100644 --- a/src/operation/drop_indexes.rs +++ b/src/operation/drop_indexes.rs @@ -1,15 +1,16 @@ +use futures_util::FutureExt; + use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, operation::{append_options, remove_empty_write_concern, OperationWithDefaults}, options::{DropIndexOptions, WriteConcern}, + BoxFuture, ClientSession, Namespace, }; -use super::{handle_response_sync, OperationResponse}; - pub(crate) struct DropIndexes { ns: Namespace, name: String, @@ -48,8 +49,8 @@ impl OperationWithDefaults for DropIndexes { _response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ Ok(()) }} + ) -> BoxFuture<'static, Result> { + async move { Ok(()) }.boxed() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/find.rs b/src/operation/find.rs index 5b5a22f17..190cd1ab6 100644 --- a/src/operation/find.rs +++ b/src/operation/find.rs @@ -1,3 +1,5 @@ +use futures_util::FutureExt; + use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -11,12 +13,11 @@ use crate::{ SERVER_4_4_0_WIRE_VERSION, }, options::{CursorType, FindOptions, SelectionCriteria}, + BoxFuture, ClientSession, Namespace, }; -use super::{handle_response_sync, OperationResponse}; - #[derive(Debug)] pub(crate) struct Find { ns: Namespace, @@ -100,13 +101,13 @@ impl OperationWithDefaults for Find { CursorBody::extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + description: &'a StreamDescription, + _session: Option<&'a mut ClientSession>, + ) -> BoxFuture<'a, Result> { + async move { let response: CursorBody = response.body()?; // The comment should only be propagated to getMore calls on 4.4+. @@ -125,7 +126,8 @@ impl OperationWithDefaults for Find { self.options.as_ref().and_then(|opts| opts.max_await_time), comment, )) - }} + } + .boxed() } fn supports_read_concern(&self, _description: &StreamDescription) -> bool { diff --git a/src/operation/find_and_modify.rs b/src/operation/find_and_modify.rs index 17bfd2f6a..7f33a82f2 100644 --- a/src/operation/find_and_modify.rs +++ b/src/operation/find_and_modify.rs @@ -2,12 +2,12 @@ pub(crate) mod options; use std::{fmt::Debug, marker::PhantomData}; -use bson::{from_slice, RawBson}; +use futures_util::FutureExt; use serde::{de::DeserializeOwned, Deserialize}; use self::options::FindAndModifyOptions; use crate::{ - bson::{doc, rawdoc, Document, RawDocumentBuf}, + bson::{doc, from_slice, rawdoc, Document, RawBson, RawDocumentBuf}, bson_util, cmap::{Command, RawCommandResponse, StreamDescription}, coll::{options::UpdateModifications, Namespace}, @@ -20,10 +20,11 @@ use crate::{ Retryability, }, options::WriteConcern, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse, UpdateOrReplace}; +use super::UpdateOrReplace; pub(crate) struct FindAndModify { ns: Namespace, @@ -102,8 +103,8 @@ impl OperationWithDefaults for FindAndModify { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { #[derive(Debug, Deserialize)] pub(crate) struct Response { value: RawBson, @@ -122,7 +123,8 @@ impl OperationWithDefaults for FindAndModify { } .into()), } - }} + } + .boxed() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/get_more.rs b/src/operation/get_more.rs index 5876fdad8..a4a075e67 100644 --- a/src/operation/get_more.rs +++ b/src/operation/get_more.rs @@ -1,10 +1,10 @@ use std::{collections::VecDeque, time::Duration}; -use bson::{Document, RawDocumentBuf}; +use futures_util::FutureExt; use serde::Deserialize; use crate::{ - bson::{doc, Bson}, + bson::{doc, Bson, Document, RawDocumentBuf}, change_stream::event::ResumeToken, cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, cursor::CursorInformation, @@ -12,12 +12,11 @@ use crate::{ operation::OperationWithDefaults, options::SelectionCriteria, results::GetMoreResult, + BoxFuture, ClientSession, Namespace, }; -use super::{handle_response_sync, OperationResponse}; - #[derive(Debug)] pub(crate) struct GetMore<'conn> { ns: Namespace, @@ -92,8 +91,8 @@ impl<'conn> OperationWithDefaults for GetMore<'conn> { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: GetMoreResponseBody = response.body()?; Ok(GetMoreResult { @@ -105,7 +104,8 @@ impl<'conn> OperationWithDefaults for GetMore<'conn> { id: response.cursor.id, ns: Namespace::from_str(response.cursor.ns.as_str()).unwrap(), }) - }} + } + .boxed() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/insert.rs b/src/operation/insert.rs index 44f1d44f1..4791f7ae6 100644 --- a/src/operation/insert.rs +++ b/src/operation/insert.rs @@ -1,5 +1,7 @@ use std::collections::HashMap; +use futures_util::FutureExt; + use crate::{ bson::{rawdoc, Bson, RawDocument, RawDocumentBuf}, bson_util::{ @@ -14,16 +16,12 @@ use crate::{ operation::{OperationWithDefaults, Retryability, WriteResponseBody}, options::{InsertManyOptions, WriteConcern}, results::InsertManyResult, + BoxFuture, ClientSession, Namespace, }; -use super::{ - handle_response_sync, - OperationResponse, - COMMAND_OVERHEAD_SIZE, - MAX_ENCRYPTED_WRITE_SIZE, -}; +use super::{COMMAND_OVERHEAD_SIZE, MAX_ENCRYPTED_WRITE_SIZE}; #[derive(Debug)] pub(crate) struct Insert<'a> { @@ -127,28 +125,28 @@ impl<'a> OperationWithDefaults for Insert<'a> { } } - fn handle_response( - &self, - raw_response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ - let response: WriteResponseBody = raw_response.body_utf8_lossy()?; - let response_n = Checked::::try_from(response.n)?; - - let mut map = HashMap::new(); - if self.options.ordered == Some(true) { - // in ordered inserts, only the first n were attempted. - for (i, id) in self.inserted_ids.iter().enumerate().take(response_n.get()?) { - map.insert(i, id.clone()); - } - } else { - // for unordered, add all the attempted ids and then remove the ones that have - // associated write errors. - for (i, id) in self.inserted_ids.iter().enumerate() { - map.insert(i, id.clone()); - } + fn handle_response<'b>( + &'b self, + response: RawCommandResponse, + _description: &'b StreamDescription, + _session: Option<&'b mut ClientSession>, + ) -> BoxFuture<'b, Result> { + async move { + let response: WriteResponseBody = response.body_utf8_lossy()?; + let response_n = Checked::::try_from(response.n)?; + + let mut map = HashMap::new(); + if self.options.ordered == Some(true) { + // in ordered inserts, only the first n were attempted. + for (i, id) in self.inserted_ids.iter().enumerate().take(response_n.get()?) { + map.insert(i, id.clone()); + } + } else { + // for unordered, add all the attempted ids and then remove the ones that have + // associated write errors. + for (i, id) in self.inserted_ids.iter().enumerate() { + map.insert(i, id.clone()); + } if let Some(write_errors) = response.write_errors.as_ref() { for err in write_errors { @@ -169,7 +167,8 @@ impl<'a> OperationWithDefaults for Insert<'a> { } Ok(InsertManyResult { inserted_ids: map }) - }} + } + .boxed() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/list_collections.rs b/src/operation/list_collections.rs index 5b82894b3..f1a1b5970 100644 --- a/src/operation/list_collections.rs +++ b/src/operation/list_collections.rs @@ -1,3 +1,5 @@ +use futures_util::FutureExt; + use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -5,11 +7,10 @@ use crate::{ error::Result, operation::{append_options, CursorBody, OperationWithDefaults, Retryability}, options::{ListCollectionsOptions, ReadPreference, SelectionCriteria}, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse}; - #[derive(Debug)] pub(crate) struct ListCollections { db: String, @@ -55,14 +56,14 @@ impl OperationWithDefaults for ListCollections { Ok(Command::new(Self::NAME.to_string(), self.db.clone(), body)) } - fn handle_response( - &self, - raw_response: RawCommandResponse, - description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ - let response: CursorBody = raw_response.body()?; + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + description: &'a StreamDescription, + _session: Option<&'a mut ClientSession>, + ) -> BoxFuture<'a, Result> { + async move { + let response: CursorBody = response.body()?; Ok(CursorSpecification::new( response.cursor, description.server_address.clone(), @@ -70,7 +71,8 @@ impl OperationWithDefaults for ListCollections { None, None, )) - }} + } + .boxed() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/list_databases.rs b/src/operation/list_databases.rs index 663d34e62..e6b81179f 100644 --- a/src/operation/list_databases.rs +++ b/src/operation/list_databases.rs @@ -1,18 +1,17 @@ -use bson::RawDocumentBuf; +use futures_util::FutureExt; use serde::Deserialize; use crate::{ - bson::{doc, Document}, + bson::{doc, Document, RawDocumentBuf}, cmap::{Command, RawCommandResponse, StreamDescription}, db::options::ListDatabasesOptions, error::Result, operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::{ReadPreference, SelectionCriteria}, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse}; - #[derive(Debug)] pub(crate) struct ListDatabases { name_only: bool, @@ -51,11 +50,12 @@ impl OperationWithDefaults for ListDatabases { raw_response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: Response = raw_response.body()?; Ok(response.databases) - }} + } + .boxed() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/list_indexes.rs b/src/operation/list_indexes.rs index c5543e493..6d336ce61 100644 --- a/src/operation/list_indexes.rs +++ b/src/operation/list_indexes.rs @@ -1,3 +1,5 @@ +use futures_util::FutureExt; + use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -6,11 +8,12 @@ use crate::{ operation::{append_options, OperationWithDefaults}, options::ListIndexesOptions, selection_criteria::{ReadPreference, SelectionCriteria}, + BoxFuture, ClientSession, Namespace, }; -use super::{handle_response_sync, CursorBody, OperationResponse, Retryability}; +use super::{CursorBody, Retryability}; pub(crate) struct ListIndexes { ns: Namespace, @@ -45,14 +48,14 @@ impl OperationWithDefaults for ListIndexes { )) } - fn handle_response( - &self, - raw_response: RawCommandResponse, - description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ - let response: CursorBody = raw_response.body()?; + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + description: &'a StreamDescription, + _session: Option<&'a mut ClientSession>, + ) -> BoxFuture<'a, Result> { + async move { + let response: CursorBody = response.body()?; Ok(CursorSpecification::new( response.cursor, description.server_address.clone(), @@ -60,7 +63,8 @@ impl OperationWithDefaults for ListIndexes { self.options.as_ref().and_then(|o| o.max_time), None, )) - }} + } + .boxed() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/raw_output.rs b/src/operation/raw_output.rs index 0ae35c1d1..0d835f31e 100644 --- a/src/operation/raw_output.rs +++ b/src/operation/raw_output.rs @@ -1,10 +1,13 @@ +use futures_util::FutureExt; + use crate::{ cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, Operation, OperationResponse}; +use super::Operation; /// Forwards all implementation to the wrapped `Operation`, but returns the response unparsed and /// unvalidated as a `RawCommandResponse`. @@ -32,8 +35,8 @@ impl Operation for RawOutput { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ Ok(response) }} + ) -> BoxFuture<'static, Result> { + async move { Ok(response) }.boxed() } fn handle_error(&self, error: crate::error::Error) -> Result { diff --git a/src/operation/run_command.rs b/src/operation/run_command.rs index e08cdcc16..2486b6c38 100644 --- a/src/operation/run_command.rs +++ b/src/operation/run_command.rs @@ -1,18 +1,18 @@ use std::convert::TryInto; -use bson::{RawBsonRef, RawDocumentBuf}; +use futures_util::FutureExt; -use super::{CursorBody, OperationWithDefaults}; use crate::{ - bson::Document, + bson::{Document, RawBsonRef, RawDocumentBuf}, client::SESSIONS_UNSUPPORTED_COMMANDS, cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, error::{ErrorKind, Result}, selection_criteria::SelectionCriteria, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse}; +use super::{CursorBody, OperationWithDefaults}; #[derive(Debug, Clone)] pub(crate) struct RunCommand<'conn> { @@ -99,8 +99,8 @@ impl<'conn> OperationWithDefaults for RunCommand<'conn> { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ Ok(response.into_raw_document_buf().try_into()?) }} + ) -> BoxFuture<'static, Result> { + async move { Ok(response.into_raw_document_buf().try_into()?) }.boxed() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/run_cursor_command.rs b/src/operation/run_cursor_command.rs index e5d6093ab..d59170a29 100644 --- a/src/operation/run_cursor_command.rs +++ b/src/operation/run_cursor_command.rs @@ -1,6 +1,7 @@ -use bson::RawDocumentBuf; +use futures_util::FutureExt; use crate::{ + bson::RawDocumentBuf, cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, concern::WriteConcern, cursor::CursorSpecification, @@ -8,11 +9,10 @@ use crate::{ operation::{run_command::RunCommand, CursorBody, Operation}, options::RunCursorCommandOptions, selection_criteria::SelectionCriteria, + BoxFuture, ClientSession, }; -use super::{handle_response_sync, OperationResponse}; - #[derive(Debug, Clone)] pub(crate) struct RunCursorCommand<'conn> { run_command: RunCommand<'conn>, @@ -88,13 +88,13 @@ impl<'conn> Operation for RunCursorCommand<'conn> { self.run_command.name() } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + description: &'a StreamDescription, + _session: Option<&'a mut ClientSession>, + ) -> BoxFuture<'a, Result> { + async move { let cursor_response: CursorBody = response.body()?; let comment = match &self.options { @@ -109,6 +109,7 @@ impl<'conn> Operation for RunCursorCommand<'conn> { self.options.as_ref().and_then(|opts| opts.max_time), comment, )) - }} + } + .boxed() } } diff --git a/src/operation/search_index.rs b/src/operation/search_index.rs index 439c46e55..5091937c0 100644 --- a/src/operation/search_index.rs +++ b/src/operation/search_index.rs @@ -1,15 +1,17 @@ -use bson::{doc, Document}; +use futures_util::FutureExt; use serde::Deserialize; use crate::{ + bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, + BoxFuture, ClientSession, Namespace, SearchIndexModel, }; -use super::{handle_response_sync, OperationResponse, OperationWithDefaults}; +use super::OperationWithDefaults; #[derive(Debug)] pub(crate) struct CreateSearchIndexes { @@ -44,8 +46,8 @@ impl OperationWithDefaults for CreateSearchIndexes { response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] struct Response { @@ -65,7 +67,8 @@ impl OperationWithDefaults for CreateSearchIndexes { .into_iter() .map(|ci| ci.name) .collect()) - }} + } + .boxed() } fn supports_sessions(&self) -> bool { @@ -119,8 +122,8 @@ impl OperationWithDefaults for UpdateSearchIndex { _response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ Ok(()) }} + ) -> BoxFuture<'static, Result> { + async move { Ok(()) }.boxed() } fn supports_sessions(&self) -> bool { @@ -168,8 +171,8 @@ impl OperationWithDefaults for DropSearchIndex { _response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ Ok(()) }} + ) -> BoxFuture<'static, Result> { + async move { Ok(()) }.boxed() } fn handle_error(&self, error: crate::error::Error) -> Result { diff --git a/src/operation/update.rs b/src/operation/update.rs index 390081f69..274bd6684 100644 --- a/src/operation/update.rs +++ b/src/operation/update.rs @@ -1,3 +1,4 @@ +use futures_util::FutureExt; use serde::Deserialize; use crate::{ @@ -8,12 +9,11 @@ use crate::{ operation::{OperationWithDefaults, Retryability, WriteResponseBody}, options::{UpdateModifications, UpdateOptions, WriteConcern}, results::UpdateResult, + BoxFuture, ClientSession, Namespace, }; -use super::{handle_response_sync, OperationResponse}; - #[derive(Clone, Debug)] pub(crate) enum UpdateOrReplace { UpdateModifications(UpdateModifications), @@ -165,8 +165,8 @@ impl OperationWithDefaults for Update { raw_response: RawCommandResponse, _description: &StreamDescription, _session: Option<&mut ClientSession>, - ) -> OperationResponse<'static, Self::O> { - handle_response_sync! {{ + ) -> BoxFuture<'static, Result> { + async move { let response: WriteResponseBody = raw_response.body_utf8_lossy()?; response.validate().map_err(convert_bulk_errors)?; @@ -189,7 +189,8 @@ impl OperationWithDefaults for Update { modified_count, upserted_id, }) - }} + } + .boxed() } fn write_concern(&self) -> Option<&WriteConcern> { From de46b4a0e78bf7a3bb6cea188e05764dee334cee Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 9 Apr 2024 15:00:22 -0600 Subject: [PATCH 41/75] use pinned connection for cursor --- src/client/executor.rs | 17 ++++++------- src/operation.rs | 31 +++++++++++++++++------ src/operation/abort_transaction.rs | 12 ++++----- src/operation/aggregate.rs | 7 +++--- src/operation/aggregate/change_stream.rs | 18 +++++++------ src/operation/bulk_write.rs | 24 ++++++++++++------ src/operation/commit_transaction.rs | 12 ++++----- src/operation/count.rs | 10 ++++---- src/operation/count_documents.rs | 12 ++++----- src/operation/create.rs | 12 ++++----- src/operation/create_indexes.rs | 6 ++--- src/operation/delete.rs | 12 ++++----- src/operation/distinct.rs | 12 ++++----- src/operation/drop_collection.rs | 12 ++++----- src/operation/drop_database.rs | 12 ++++----- src/operation/drop_indexes.rs | 12 ++++----- src/operation/find.rs | 8 +++--- src/operation/find_and_modify.rs | 12 ++++----- src/operation/get_more.rs | 12 ++++----- src/operation/insert.rs | 6 ++--- src/operation/list_collections.rs | 8 +++--- src/operation/list_databases.rs | 16 ++++++------ src/operation/list_indexes.rs | 8 +++--- src/operation/raw_output.rs | 12 ++++----- src/operation/run_command.rs | 12 ++++----- src/operation/run_cursor_command.rs | 8 +++--- src/operation/search_index.rs | 32 +++++++++++------------- src/operation/update.rs | 16 ++++++------ src/test/bulk_write.rs | 16 +++++++----- 29 files changed, 198 insertions(+), 189 deletions(-) diff --git a/src/client/executor.rs b/src/client/executor.rs index bae4e776b..815400dff 100644 --- a/src/client/executor.rs +++ b/src/client/executor.rs @@ -53,6 +53,7 @@ use crate::{ AbortTransaction, CommandErrorBody, CommitTransaction, + ExecutionContext, Operation, Retryability, }, @@ -189,7 +190,7 @@ impl Client { self.inner.options.load_balanced.unwrap_or(false) } - fn pin_connection_for_cursor( + pub(crate) fn pin_connection_for_cursor( &self, spec: &CursorSpecification, conn: &mut Connection, @@ -788,14 +789,12 @@ impl Client { } }; - match op - .handle_response( - response, - connection.stream_description()?, - session.as_deref_mut(), - ) - .await - { + let context = ExecutionContext { + connection, + session: session.as_deref_mut(), + }; + + match op.handle_response(response, context).await { Ok(response) => Ok(response), Err(mut err) => { err.add_labels_and_update_pin( diff --git a/src/operation.rs b/src/operation.rs index b02d96d25..ee5f9a831 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -33,7 +33,13 @@ use crate::{ bson::{self, Bson, Document}, bson_util::{self, extend_raw_document_buf}, client::{ClusterTime, HELLO_COMMAND_NAMES, REDACTED_COMMANDS}, - cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, + cmap::{ + conn::PinnedConnectionHandle, + Command, + Connection, + RawCommandResponse, + StreamDescription, + }, error::{ BulkWriteError, BulkWriteFailure, @@ -75,6 +81,18 @@ const MAX_ENCRYPTED_WRITE_SIZE: usize = 2_097_152; // The amount of overhead bytes to account for when building a document sequence. const COMMAND_OVERHEAD_SIZE: usize = 16_000; +/// Context about the execution of the operation. +pub(crate) struct ExecutionContext<'a> { + pub(crate) connection: &'a mut Connection, + pub(crate) session: Option<&'a mut ClientSession>, +} + +impl<'a> ExecutionContext<'a> { + pub(crate) fn stream_description(&'a self) -> Result<&'a StreamDescription> { + self.connection.stream_description() + } +} + /// A trait modeling the behavior of a server side operation. /// /// No methods in this trait should have default behaviors to ensure that wrapper operations @@ -101,8 +119,7 @@ pub(crate) trait Operation { fn handle_response<'a>( &'a self, response: RawCommandResponse, - description: &'a StreamDescription, - session: Option<&'a mut ClientSession>, + context: ExecutionContext<'a>, ) -> BoxFuture<'a, Result>; /// Interpret an error encountered while sending the built command to the server, potentially @@ -415,8 +432,7 @@ pub(crate) trait OperationWithDefaults { fn handle_response<'a>( &'a self, response: RawCommandResponse, - description: &'a StreamDescription, - session: Option<&'a mut ClientSession>, + context: ExecutionContext<'a>, ) -> BoxFuture<'a, Result>; /// Interpret an error encountered while sending the built command to the server, potentially @@ -482,10 +498,9 @@ impl Operation for T { fn handle_response<'a>( &'a self, response: RawCommandResponse, - description: &'a StreamDescription, - session: Option<&'a mut ClientSession>, + context: ExecutionContext<'a>, ) -> BoxFuture<'a, Result> { - self.handle_response(response, description, session) + self.handle_response(response, context) } fn handle_error(&self, error: Error) -> Result { self.handle_error(error) diff --git a/src/operation/abort_transaction.rs b/src/operation/abort_transaction.rs index 16e43bae9..f9fd57441 100644 --- a/src/operation/abort_transaction.rs +++ b/src/operation/abort_transaction.rs @@ -10,10 +10,9 @@ use crate::{ options::WriteConcern, selection_criteria::SelectionCriteria, BoxFuture, - ClientSession, }; -use super::{OperationWithDefaults, WriteConcernOnlyBody}; +use super::{ExecutionContext, OperationWithDefaults, WriteConcernOnlyBody}; pub(crate) struct AbortTransaction { write_concern: Option, @@ -52,12 +51,11 @@ impl OperationWithDefaults for AbortTransaction { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { let response: WriteConcernOnlyBody = response.body()?; response.validate() diff --git a/src/operation/aggregate.rs b/src/operation/aggregate.rs index 2d136ae4e..aaa589c04 100644 --- a/src/operation/aggregate.rs +++ b/src/operation/aggregate.rs @@ -11,12 +11,12 @@ use crate::{ operation::{append_options, remove_empty_write_concern, Retryability}, options::{AggregateOptions, SelectionCriteria, WriteConcern}, BoxFuture, - ClientSession, Namespace, }; use super::{ CursorBody, + ExecutionContext, OperationWithDefaults, WriteConcernOnlyBody, SERVER_4_2_0_WIRE_VERSION, @@ -86,8 +86,7 @@ impl OperationWithDefaults for Aggregate { fn handle_response<'a>( &'a self, response: RawCommandResponse, - description: &'a StreamDescription, - _session: Option<&'a mut ClientSession>, + context: ExecutionContext<'a>, ) -> BoxFuture<'a, Result> { async move { let cursor_response: CursorBody = response.body()?; @@ -97,6 +96,8 @@ impl OperationWithDefaults for Aggregate { wc_error_info.validate()?; }; + let description = context.stream_description()?; + // The comment should only be propagated to getMore calls on 4.4+. let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { None diff --git a/src/operation/aggregate/change_stream.rs b/src/operation/aggregate/change_stream.rs index a34dca3d4..062dc6b30 100644 --- a/src/operation/aggregate/change_stream.rs +++ b/src/operation/aggregate/change_stream.rs @@ -6,10 +6,9 @@ use crate::{ cmap::{Command, RawCommandResponse, StreamDescription}, cursor::CursorSpecification, error::Result, - operation::{append_options, OperationWithDefaults, Retryability}, + operation::{append_options, ExecutionContext, OperationWithDefaults, Retryability}, options::{ChangeStreamOptions, SelectionCriteria, WriteConcern}, BoxFuture, - ClientSession, }; use super::Aggregate; @@ -89,18 +88,19 @@ impl OperationWithDefaults for ChangeStreamAggregate { fn handle_response<'a>( &'a self, response: RawCommandResponse, - description: &'a StreamDescription, - session: Option<&'a mut ClientSession>, + mut context: ExecutionContext<'a>, ) -> BoxFuture<'a, Result> { async move { let op_time = response .raw_body() .get("operationTime")? .and_then(bson::RawBsonRef::as_timestamp); - let spec = self - .inner - .handle_response(response, description, session) - .await?; + + let inner_context = ExecutionContext { + connection: context.connection, + session: context.session.as_deref_mut(), + }; + let spec = self.inner.handle_response(response, inner_context).await?; let mut data = ChangeStreamData { resume_token: ResumeToken::initial(self.args.options.as_ref(), &spec), @@ -111,6 +111,8 @@ impl OperationWithDefaults for ChangeStreamAggregate { && o.resume_after.is_none() && o.start_after.is_none() }; + + let description = context.stream_description()?; if self.args.options.as_ref().map_or(true, has_no_time) && description.max_wire_version.map_or(false, |v| v >= 7) && spec.initial_buffer.is_empty() diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index f6a6548cb..f476f557e 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -16,13 +16,18 @@ use crate::{ results::{BulkWriteResult, DeleteResult, InsertOneResult, UpdateResult}, BoxFuture, Client, - ClientSession, Cursor, Namespace, SessionCursor, }; -use super::{Retryability, WriteResponseBody, COMMAND_OVERHEAD_SIZE, MAX_ENCRYPTED_WRITE_SIZE}; +use super::{ + ExecutionContext, + Retryability, + WriteResponseBody, + COMMAND_OVERHEAD_SIZE, + MAX_ENCRYPTED_WRITE_SIZE, +}; use server_responses::*; @@ -264,8 +269,7 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { fn handle_response<'b>( &'b self, response: RawCommandResponse, - description: &'b StreamDescription, - session: Option<&'b mut ClientSession>, + context: ExecutionContext<'b>, ) -> BoxFuture<'b, Result> { async move { let response: WriteResponseBody = response.body()?; @@ -289,15 +293,18 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { let specification = CursorSpecification::new( response.body.cursor, - description.server_address.clone(), + context.stream_description()?.server_address.clone(), None, None, self.options.and_then(|options| options.comment.clone()), ); - let iteration_result = match session { + let pinned_connection = self + .client + .pin_connection_for_cursor(&specification, context.connection)?; + let iteration_result = match context.session { Some(session) => { let mut session_cursor = - SessionCursor::new(self.client.clone(), specification, None); + SessionCursor::new(self.client.clone(), specification, pinned_connection); self.iterate_results_cursor( session_cursor.stream(session), &mut bulk_write_error, @@ -305,7 +312,8 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { .await } None => { - let cursor = Cursor::new(self.client.clone(), specification, None, None); + let cursor = + Cursor::new(self.client.clone(), specification, None, pinned_connection); self.iterate_results_cursor(cursor, &mut bulk_write_error) .await } diff --git a/src/operation/commit_transaction.rs b/src/operation/commit_transaction.rs index d08917b4b..6c621cbf3 100644 --- a/src/operation/commit_transaction.rs +++ b/src/operation/commit_transaction.rs @@ -9,10 +9,9 @@ use crate::{ operation::{append_options, remove_empty_write_concern, OperationWithDefaults, Retryability}, options::{Acknowledgment, TransactionOptions, WriteConcern}, BoxFuture, - ClientSession, }; -use super::WriteConcernOnlyBody; +use super::{ExecutionContext, WriteConcernOnlyBody}; pub(crate) struct CommitTransaction { options: Option, @@ -45,12 +44,11 @@ impl OperationWithDefaults for CommitTransaction { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { let response: WriteConcernOnlyBody = response.body()?; response.validate() diff --git a/src/operation/count.rs b/src/operation/count.rs index 37c33590b..201e9f79d 100644 --- a/src/operation/count.rs +++ b/src/operation/count.rs @@ -9,9 +9,10 @@ use crate::{ operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::SelectionCriteria, BoxFuture, - ClientSession, }; +use super::ExecutionContext; + pub(crate) struct Count { ns: Namespace, options: Option, @@ -44,11 +45,10 @@ impl OperationWithDefaults for Count { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, + _context: ExecutionContext<'a>, ) -> BoxFuture<'static, Result> { async move { let response_body: ResponseBody = response.body()?; diff --git a/src/operation/count_documents.rs b/src/operation/count_documents.rs index 7afef76f5..97a90409c 100644 --- a/src/operation/count_documents.rs +++ b/src/operation/count_documents.rs @@ -11,11 +11,10 @@ use crate::{ options::{AggregateOptions, CountOptions}, selection_criteria::SelectionCriteria, BoxFuture, - ClientSession, Namespace, }; -use super::{OperationWithDefaults, Retryability, SingleCursorResult}; +use super::{ExecutionContext, OperationWithDefaults, Retryability, SingleCursorResult}; pub(crate) struct CountDocuments { aggregate: Aggregate, @@ -91,12 +90,11 @@ impl OperationWithDefaults for CountDocuments { self.aggregate.extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { let response: SingleCursorResult = response.body()?; Ok(response.0.map(|r| r.n).unwrap_or(0)) diff --git a/src/operation/create.rs b/src/operation/create.rs index 6c9a2696a..8d375c4fa 100644 --- a/src/operation/create.rs +++ b/src/operation/create.rs @@ -12,10 +12,11 @@ use crate::{ }, options::{CreateCollectionOptions, WriteConcern}, BoxFuture, - ClientSession, Namespace, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct Create { ns: Namespace, @@ -49,12 +50,11 @@ impl OperationWithDefaults for Create { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { let response: WriteConcernOnlyBody = response.body()?; response.validate() diff --git a/src/operation/create_indexes.rs b/src/operation/create_indexes.rs index f1cbd513c..b17ea13de 100644 --- a/src/operation/create_indexes.rs +++ b/src/operation/create_indexes.rs @@ -9,11 +9,10 @@ use crate::{ options::{CreateIndexOptions, WriteConcern}, results::CreateIndexesResult, BoxFuture, - ClientSession, Namespace, }; -use super::WriteConcernOnlyBody; +use super::{ExecutionContext, WriteConcernOnlyBody}; #[derive(Debug)] pub(crate) struct CreateIndexes { @@ -77,8 +76,7 @@ impl OperationWithDefaults for CreateIndexes { fn handle_response<'a>( &'a self, response: RawCommandResponse, - _description: &'a StreamDescription, - _session: Option<&'a mut ClientSession>, + _context: ExecutionContext<'a>, ) -> BoxFuture<'a, Result> { async move { let response: WriteConcernOnlyBody = response.body()?; diff --git a/src/operation/delete.rs b/src/operation/delete.rs index 3a5ebdd06..1a13184c2 100644 --- a/src/operation/delete.rs +++ b/src/operation/delete.rs @@ -16,9 +16,10 @@ use crate::{ options::{DeleteOptions, Hint, WriteConcern}, results::DeleteResult, BoxFuture, - ClientSession, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct Delete { ns: Namespace, @@ -83,12 +84,11 @@ impl OperationWithDefaults for Delete { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { let response: WriteResponseBody = response.body()?; response.validate().map_err(convert_bulk_errors)?; diff --git a/src/operation/distinct.rs b/src/operation/distinct.rs index 737f8339e..addb093b0 100644 --- a/src/operation/distinct.rs +++ b/src/operation/distinct.rs @@ -9,9 +9,10 @@ use crate::{ operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::SelectionCriteria, BoxFuture, - ClientSession, }; +use super::ExecutionContext; + pub(crate) struct Distinct { ns: Namespace, field_name: String, @@ -70,12 +71,11 @@ impl OperationWithDefaults for Distinct { .and_then(RawBsonRef::as_timestamp)) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { let response: Response = response.body()?; Ok(response.values) diff --git a/src/operation/drop_collection.rs b/src/operation/drop_collection.rs index d62aa481a..d59f64a18 100644 --- a/src/operation/drop_collection.rs +++ b/src/operation/drop_collection.rs @@ -12,10 +12,11 @@ use crate::{ }, options::{DropCollectionOptions, WriteConcern}, BoxFuture, - ClientSession, Namespace, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct DropCollection { ns: Namespace, @@ -49,12 +50,11 @@ impl OperationWithDefaults for DropCollection { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { let response: WriteConcernOnlyBody = response.body()?; response.validate() diff --git a/src/operation/drop_database.rs b/src/operation/drop_database.rs index b0fa64917..f74612524 100644 --- a/src/operation/drop_database.rs +++ b/src/operation/drop_database.rs @@ -13,9 +13,10 @@ use crate::{ }, options::WriteConcern, BoxFuture, - ClientSession, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct DropDatabase { target_db: String, @@ -49,12 +50,11 @@ impl OperationWithDefaults for DropDatabase { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { let response: WriteConcernOnlyBody = response.body()?; response.validate() diff --git a/src/operation/drop_indexes.rs b/src/operation/drop_indexes.rs index 17236646e..98051818e 100644 --- a/src/operation/drop_indexes.rs +++ b/src/operation/drop_indexes.rs @@ -7,10 +7,11 @@ use crate::{ operation::{append_options, remove_empty_write_concern, OperationWithDefaults}, options::{DropIndexOptions, WriteConcern}, BoxFuture, - ClientSession, Namespace, }; +use super::ExecutionContext; + pub(crate) struct DropIndexes { ns: Namespace, name: String, @@ -44,12 +45,11 @@ impl OperationWithDefaults for DropIndexes { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, _response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { Ok(()) }.boxed() } diff --git a/src/operation/find.rs b/src/operation/find.rs index 190cd1ab6..3384fe554 100644 --- a/src/operation/find.rs +++ b/src/operation/find.rs @@ -14,10 +14,11 @@ use crate::{ }, options::{CursorType, FindOptions, SelectionCriteria}, BoxFuture, - ClientSession, Namespace, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct Find { ns: Namespace, @@ -104,12 +105,13 @@ impl OperationWithDefaults for Find { fn handle_response<'a>( &'a self, response: RawCommandResponse, - description: &'a StreamDescription, - _session: Option<&'a mut ClientSession>, + context: ExecutionContext<'a>, ) -> BoxFuture<'a, Result> { async move { let response: CursorBody = response.body()?; + let description = context.stream_description()?; + // The comment should only be propagated to getMore calls on 4.4+. let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { None diff --git a/src/operation/find_and_modify.rs b/src/operation/find_and_modify.rs index 7f33a82f2..52d75fb5c 100644 --- a/src/operation/find_and_modify.rs +++ b/src/operation/find_and_modify.rs @@ -21,10 +21,9 @@ use crate::{ }, options::WriteConcern, BoxFuture, - ClientSession, }; -use super::UpdateOrReplace; +use super::{ExecutionContext, UpdateOrReplace}; pub(crate) struct FindAndModify { ns: Namespace, @@ -98,12 +97,11 @@ impl OperationWithDefaults for FindAndModify { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { #[derive(Debug, Deserialize)] pub(crate) struct Response { diff --git a/src/operation/get_more.rs b/src/operation/get_more.rs index a4a075e67..55915e65e 100644 --- a/src/operation/get_more.rs +++ b/src/operation/get_more.rs @@ -13,10 +13,11 @@ use crate::{ options::SelectionCriteria, results::GetMoreResult, BoxFuture, - ClientSession, Namespace, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct GetMore<'conn> { ns: Namespace, @@ -86,12 +87,11 @@ impl<'conn> OperationWithDefaults for GetMore<'conn> { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { let response: GetMoreResponseBody = response.body()?; diff --git a/src/operation/insert.rs b/src/operation/insert.rs index 4791f7ae6..fd58b8f27 100644 --- a/src/operation/insert.rs +++ b/src/operation/insert.rs @@ -17,11 +17,10 @@ use crate::{ options::{InsertManyOptions, WriteConcern}, results::InsertManyResult, BoxFuture, - ClientSession, Namespace, }; -use super::{COMMAND_OVERHEAD_SIZE, MAX_ENCRYPTED_WRITE_SIZE}; +use super::{ExecutionContext, COMMAND_OVERHEAD_SIZE, MAX_ENCRYPTED_WRITE_SIZE}; #[derive(Debug)] pub(crate) struct Insert<'a> { @@ -128,8 +127,7 @@ impl<'a> OperationWithDefaults for Insert<'a> { fn handle_response<'b>( &'b self, response: RawCommandResponse, - _description: &'b StreamDescription, - _session: Option<&'b mut ClientSession>, + _context: ExecutionContext<'b>, ) -> BoxFuture<'b, Result> { async move { let response: WriteResponseBody = response.body_utf8_lossy()?; diff --git a/src/operation/list_collections.rs b/src/operation/list_collections.rs index f1a1b5970..2e445b777 100644 --- a/src/operation/list_collections.rs +++ b/src/operation/list_collections.rs @@ -8,9 +8,10 @@ use crate::{ operation::{append_options, CursorBody, OperationWithDefaults, Retryability}, options::{ListCollectionsOptions, ReadPreference, SelectionCriteria}, BoxFuture, - ClientSession, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct ListCollections { db: String, @@ -59,14 +60,13 @@ impl OperationWithDefaults for ListCollections { fn handle_response<'a>( &'a self, response: RawCommandResponse, - description: &'a StreamDescription, - _session: Option<&'a mut ClientSession>, + context: ExecutionContext<'a>, ) -> BoxFuture<'a, Result> { async move { let response: CursorBody = response.body()?; Ok(CursorSpecification::new( response.cursor, - description.server_address.clone(), + context.stream_description()?.server_address.clone(), self.options.as_ref().and_then(|opts| opts.batch_size), None, None, diff --git a/src/operation/list_databases.rs b/src/operation/list_databases.rs index e6b81179f..d2dcf646f 100644 --- a/src/operation/list_databases.rs +++ b/src/operation/list_databases.rs @@ -9,9 +9,10 @@ use crate::{ operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::{ReadPreference, SelectionCriteria}, BoxFuture, - ClientSession, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct ListDatabases { name_only: bool, @@ -45,14 +46,13 @@ impl OperationWithDefaults for ListDatabases { )) } - fn handle_response( - &self, - raw_response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { - let response: Response = raw_response.body()?; + let response: Response = response.body()?; Ok(response.databases) } .boxed() diff --git a/src/operation/list_indexes.rs b/src/operation/list_indexes.rs index 6d336ce61..f87af0cd4 100644 --- a/src/operation/list_indexes.rs +++ b/src/operation/list_indexes.rs @@ -9,11 +9,10 @@ use crate::{ options::ListIndexesOptions, selection_criteria::{ReadPreference, SelectionCriteria}, BoxFuture, - ClientSession, Namespace, }; -use super::{CursorBody, Retryability}; +use super::{CursorBody, ExecutionContext, Retryability}; pub(crate) struct ListIndexes { ns: Namespace, @@ -51,14 +50,13 @@ impl OperationWithDefaults for ListIndexes { fn handle_response<'a>( &'a self, response: RawCommandResponse, - description: &'a StreamDescription, - _session: Option<&'a mut ClientSession>, + context: ExecutionContext<'a>, ) -> BoxFuture<'a, Result> { async move { let response: CursorBody = response.body()?; Ok(CursorSpecification::new( response.cursor, - description.server_address.clone(), + context.stream_description()?.server_address.clone(), self.options.as_ref().and_then(|o| o.batch_size), self.options.as_ref().and_then(|o| o.max_time), None, diff --git a/src/operation/raw_output.rs b/src/operation/raw_output.rs index 0d835f31e..15f6791a5 100644 --- a/src/operation/raw_output.rs +++ b/src/operation/raw_output.rs @@ -4,10 +4,9 @@ use crate::{ cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, BoxFuture, - ClientSession, }; -use super::Operation; +use super::{ExecutionContext, Operation}; /// Forwards all implementation to the wrapped `Operation`, but returns the response unparsed and /// unvalidated as a `RawCommandResponse`. @@ -30,12 +29,11 @@ impl Operation for RawOutput { self.0.extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { Ok(response) }.boxed() } diff --git a/src/operation/run_command.rs b/src/operation/run_command.rs index 2486b6c38..48552de96 100644 --- a/src/operation/run_command.rs +++ b/src/operation/run_command.rs @@ -9,10 +9,9 @@ use crate::{ error::{ErrorKind, Result}, selection_criteria::SelectionCriteria, BoxFuture, - ClientSession, }; -use super::{CursorBody, OperationWithDefaults}; +use super::{CursorBody, ExecutionContext, OperationWithDefaults}; #[derive(Debug, Clone)] pub(crate) struct RunCommand<'conn> { @@ -94,12 +93,11 @@ impl<'conn> OperationWithDefaults for RunCommand<'conn> { } } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { Ok(response.into_raw_document_buf().try_into()?) }.boxed() } diff --git a/src/operation/run_cursor_command.rs b/src/operation/run_cursor_command.rs index d59170a29..2343ad93f 100644 --- a/src/operation/run_cursor_command.rs +++ b/src/operation/run_cursor_command.rs @@ -10,9 +10,10 @@ use crate::{ options::RunCursorCommandOptions, selection_criteria::SelectionCriteria, BoxFuture, - ClientSession, }; +use super::ExecutionContext; + #[derive(Debug, Clone)] pub(crate) struct RunCursorCommand<'conn> { run_command: RunCommand<'conn>, @@ -91,8 +92,7 @@ impl<'conn> Operation for RunCursorCommand<'conn> { fn handle_response<'a>( &'a self, response: RawCommandResponse, - description: &'a StreamDescription, - _session: Option<&'a mut ClientSession>, + context: ExecutionContext<'a>, ) -> BoxFuture<'a, Result> { async move { let cursor_response: CursorBody = response.body()?; @@ -104,7 +104,7 @@ impl<'conn> Operation for RunCursorCommand<'conn> { Ok(CursorSpecification::new( cursor_response.cursor, - description.server_address.clone(), + context.stream_description()?.server_address.clone(), self.options.as_ref().and_then(|opts| opts.batch_size), self.options.as_ref().and_then(|opts| opts.max_time), comment, diff --git a/src/operation/search_index.rs b/src/operation/search_index.rs index 5091937c0..819a7e0bf 100644 --- a/src/operation/search_index.rs +++ b/src/operation/search_index.rs @@ -3,15 +3,14 @@ use serde::Deserialize; use crate::{ bson::{doc, Document}, - cmap::{Command, RawCommandResponse, StreamDescription}, + cmap::{Command, RawCommandResponse}, error::Result, BoxFuture, - ClientSession, Namespace, SearchIndexModel, }; -use super::OperationWithDefaults; +use super::{ExecutionContext, OperationWithDefaults}; #[derive(Debug)] pub(crate) struct CreateSearchIndexes { @@ -41,12 +40,11 @@ impl OperationWithDefaults for CreateSearchIndexes { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] @@ -117,12 +115,11 @@ impl OperationWithDefaults for UpdateSearchIndex { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, _response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { Ok(()) }.boxed() } @@ -166,12 +163,11 @@ impl OperationWithDefaults for DropSearchIndex { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, _response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { Ok(()) }.boxed() } diff --git a/src/operation/update.rs b/src/operation/update.rs index 274bd6684..e9e0cb87e 100644 --- a/src/operation/update.rs +++ b/src/operation/update.rs @@ -10,10 +10,11 @@ use crate::{ options::{UpdateModifications, UpdateOptions, WriteConcern}, results::UpdateResult, BoxFuture, - ClientSession, Namespace, }; +use super::ExecutionContext; + #[derive(Clone, Debug)] pub(crate) enum UpdateOrReplace { UpdateModifications(UpdateModifications), @@ -160,14 +161,13 @@ impl OperationWithDefaults for Update { )) } - fn handle_response( - &self, - raw_response: RawCommandResponse, - _description: &StreamDescription, - _session: Option<&mut ClientSession>, - ) -> BoxFuture<'static, Result> { + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { async move { - let response: WriteResponseBody = raw_response.body_utf8_lossy()?; + let response: WriteResponseBody = response.body_utf8_lossy()?; response.validate().map_err(convert_bulk_errors)?; let modified_count = response.n_modified; diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 1c7650f17..151b4540d 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -43,16 +43,18 @@ async fn max_write_batch_size_batching() { let result = client.bulk_write(models).await.unwrap(); assert_eq!(result.inserted_count as usize, max_write_batch_size + 1); - let command_started_events = event_buffer.get_command_started_events(&["bulkWrite"]); + let mut command_started_events = event_buffer + .get_command_started_events(&["bulkWrite"]) + .into_iter(); let first_event = command_started_events - .get(0) + .next() .expect("no first event observed"); let first_len = first_event.command.get_array("ops").unwrap().len(); assert_eq!(first_len, max_write_batch_size); let second_event = command_started_events - .get(1) + .next() .expect("no second event observed"); let second_len = second_event.command.get_array("ops").unwrap().len(); assert_eq!(second_len, 1); @@ -85,16 +87,18 @@ async fn max_message_size_bytes_batching() { let result = client.bulk_write(models).await.unwrap(); assert_eq!(result.inserted_count as usize, num_models); - let command_started_events = event_buffer.get_command_started_events(&["bulkWrite"]); + let mut command_started_events = event_buffer + .get_command_started_events(&["bulkWrite"]) + .into_iter(); let first_event = command_started_events - .get(0) + .next() .expect("no first event observed"); let first_len = first_event.command.get_array("ops").unwrap().len(); assert_eq!(first_len, num_models - 1); let second_event = command_started_events - .get(1) + .next() .expect("no second event observed"); let second_len = second_event.command.get_array("ops").unwrap().len(); assert_eq!(second_len, 1); From acbc534f08784d02319bf04c4293d38993d85425 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Tue, 9 Apr 2024 15:54:23 -0600 Subject: [PATCH 42/75] reduce _id size --- src/test/bulk_write.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 151b4540d..84c53a1f7 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -224,7 +224,7 @@ async fn successful_cursor_iteration() { let max_bson_object_size = client.server_info.max_bson_object_size as usize; - let document = doc! { "_id": "a".repeat(max_bson_object_size - 500) }; + let document = doc! { "_id": "a".repeat(max_bson_object_size / 2) }; let collection = client.database("db").collection("coll"); collection.drop().await.unwrap(); @@ -268,7 +268,7 @@ async fn failed_cursor_iteration() { let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(8); let _guard = client.enable_fail_point(fail_point).await.unwrap(); - let document = doc! { "_id": "a".repeat(max_bson_object_size - 500) }; + let document = doc! { "_id": "a".repeat(max_bson_object_size / 2) }; let collection = client.database("db").collection("coll"); collection.drop().await.unwrap(); From 20ce39dcf0e091fe58fdffc86ee0036480a49ebf Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Wed, 10 Apr 2024 14:08:53 -0600 Subject: [PATCH 43/75] fix fle --- src/test/csfle.rs | 30 +++++++----------------------- 1 file changed, 7 insertions(+), 23 deletions(-) diff --git a/src/test/csfle.rs b/src/test/csfle.rs index 5435f57f7..63589960b 100644 --- a/src/test/csfle.rs +++ b/src/test/csfle.rs @@ -53,14 +53,7 @@ use crate::{ #[allow(deprecated)] use super::EventClient; -use super::{ - get_client_options, - log_uncaptured, - FailCommandOptions, - FailPoint, - FailPointMode, - TestClient, -}; +use super::{get_client_options, log_uncaptured, FailPoint, FailPointMode, TestClient}; type Result = anyhow::Result; @@ -2414,12 +2407,8 @@ async fn decryption_events_command_error() -> Result<()> { None => return Ok(()), }; - let fp = FailPoint::fail_command( - &["aggregate"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(123).build(), - ); - let _guard = fp.enable(&td.setup_client, None).await?; + let fail_point = FailPoint::new(&["aggregate"], FailPointMode::Times(1)).error_code(123); + let _guard = td.setup_client.enable_fail_point(fail_point).await.unwrap(); let err = td .decryption_events .aggregate(vec![doc! { "$count": "total" }]) @@ -2443,15 +2432,10 @@ async fn decryption_events_network_error() -> Result<()> { None => return Ok(()), }; - let fp = FailPoint::fail_command( - &["aggregate"], - FailPointMode::Times(1), - FailCommandOptions::builder() - .error_code(123) - .close_connection(true) - .build(), - ); - let _guard = fp.enable(&td.setup_client, None).await?; + let fail_point = FailPoint::new(&["aggregate"], FailPointMode::Times(1)) + .error_code(123) + .close_connection(true); + let _guard = td.setup_client.enable_fail_point(fail_point).await.unwrap(); let err = td .decryption_events .aggregate(vec![doc! { "$count": "total" }]) From cfd0c3e006cb76a919a5f3c28514b12cdf09e740 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Wed, 10 Apr 2024 14:50:50 -0600 Subject: [PATCH 44/75] strip extra mongoses --- src/test/bulk_write.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 84c53a1f7..b84b191ec 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -14,6 +14,8 @@ use crate::{ Namespace, }; +use super::TestClient; + #[tokio::test(flavor = "multi_thread")] async fn run_unified() { run_unified_tests(&["crud", "unified", "new-bulk-write"]).await; @@ -108,6 +110,9 @@ async fn max_message_size_bytes_batching() { async fn write_concern_error_batches() { let mut options = get_client_options().await.clone(); options.retry_writes = Some(false); + if TestClient::new().await.is_sharded() { + options.hosts.drain(1..); + } let event_buffer = EventBuffer::new(); let client = Client::test_builder() @@ -252,8 +257,14 @@ async fn successful_cursor_iteration() { #[tokio::test(flavor = "multi_thread")] async fn failed_cursor_iteration() { + let mut options = get_client_options().await.clone(); + if TestClient::new().await.is_sharded() { + options.hosts.drain(1..); + } + let event_buffer = EventBuffer::new(); let client = Client::test_builder() + .options(options) .event_buffer(event_buffer.clone()) .build() .await; From dbe435211a46d3b493da1d0330390b4e8570124f Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 11 Apr 2024 09:57:00 -0600 Subject: [PATCH 45/75] rework iteration tests --- src/test/bulk_write.rs | 153 ++++++++++++++++++++++++++++++++--------- 1 file changed, 119 insertions(+), 34 deletions(-) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index b84b191ec..dee967931 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -1,6 +1,6 @@ use crate::{ bson::doc, - error::ErrorKind, + error::{ClientBulkWriteError, ErrorKind}, options::WriteModel, test::{ get_client_options, @@ -229,27 +229,36 @@ async fn successful_cursor_iteration() { let max_bson_object_size = client.server_info.max_bson_object_size as usize; - let document = doc! { "_id": "a".repeat(max_bson_object_size / 2) }; - - let collection = client.database("db").collection("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); - collection.insert_one(document.clone()).await.unwrap(); let models = vec![ - WriteModel::InsertOne { + WriteModel::UpdateOne { namespace: collection.namespace(), - document - }; - 2 + filter: doc! { "_id": "a".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "b".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, ]; - let error = client.bulk_write(models).ordered(false).await.unwrap_err(); - - let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { - panic!("Expected bulk write error, got {:?}", error); - }; - let write_errors = bulk_write_error.write_errors; - assert_eq!(write_errors.len(), 2); + let result = client + .bulk_write(models) + .verbose_results(true) + .await + .unwrap(); + assert_eq!(result.upserted_count, 2); let command_started_events = event_buffer.get_command_started_events(&["getMore"]); assert_eq!(command_started_events.len(), 1); @@ -257,14 +266,8 @@ async fn successful_cursor_iteration() { #[tokio::test(flavor = "multi_thread")] async fn failed_cursor_iteration() { - let mut options = get_client_options().await.clone(); - if TestClient::new().await.is_sharded() { - options.hosts.drain(1..); - } - let event_buffer = EventBuffer::new(); let client = Client::test_builder() - .options(options) .event_buffer(event_buffer.clone()) .build() .await; @@ -279,28 +282,110 @@ async fn failed_cursor_iteration() { let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(8); let _guard = client.enable_fail_point(fail_point).await.unwrap(); - let document = doc! { "_id": "a".repeat(max_bson_object_size / 2) }; - - let collection = client.database("db").collection("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); - collection.insert_one(document.clone()).await.unwrap(); let models = vec![ - WriteModel::InsertOne { + WriteModel::UpdateOne { namespace: collection.namespace(), - document - }; - 2 + filter: doc! { "_id": "a".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "b".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, ]; - let error = client.bulk_write(models).ordered(false).await.unwrap_err(); + + let error = client + .bulk_write(models) + .verbose_results(true) + .await + .unwrap_err(); let Some(ref source) = error.source else { panic!("Expected error to contain source"); }; assert_eq!(source.code(), Some(8)); - let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { - panic!("Expected bulk write error, got {:?}", error); + let ErrorKind::ClientBulkWrite(ClientBulkWriteError { + partial_result: Some(partial_result), + .. + }) = *error.kind + else { + panic!( + "Expected bulk write error with partial result, got {:?}", + error + ); }; - assert_eq!(bulk_write_error.write_errors.len(), 1); + assert_eq!(partial_result.upserted_count, 2); + + let get_more_events = event_buffer.get_command_started_events(&["getMore"]); + assert_eq!(get_more_events.len(), 1); + + let kill_cursors_events = event_buffer.get_command_started_events(&["killCursors"]); + assert_eq!(kill_cursors_events.len(), 1); +} + +#[tokio::test] +async fn cursor_iteration_in_a_transaction() { + let event_buffer = EventBuffer::new(); + let client = Client::test_builder() + .event_buffer(event_buffer.clone()) + .build() + .await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping cursor_iteration_in_a_transaction: bulkWrite requires 8.0+"); + return; + } + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + + let collection = client.database("db").collection::("coll"); + collection.drop().await.unwrap(); + + let mut session = client.start_session().await.unwrap(); + session.start_transaction().await.unwrap(); + + let models = vec![ + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "a".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "b".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + ]; + + let result = client + .bulk_write(models) + .verbose_results(true) + .session(&mut session) + .await + .unwrap(); + assert_eq!(result.upserted_count, 2); + + let command_started_events = event_buffer.get_command_started_events(&["getMore"]); + assert_eq!(command_started_events.len(), 1); } From a5612cd2b353d8cdf52456190fe7aed677aedb29 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 11 Apr 2024 10:50:36 -0600 Subject: [PATCH 46/75] more assertions --- src/test/bulk_write.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index dee967931..17c2069a2 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -259,6 +259,7 @@ async fn successful_cursor_iteration() { .await .unwrap(); assert_eq!(result.upserted_count, 2); + assert_eq!(result.update_results.unwrap().len(), 2); let command_started_events = event_buffer.get_command_started_events(&["getMore"]); assert_eq!(command_started_events.len(), 1); @@ -328,6 +329,7 @@ async fn failed_cursor_iteration() { ); }; assert_eq!(partial_result.upserted_count, 2); + assert_eq!(partial_result.update_results.unwrap().len(), 1); let get_more_events = event_buffer.get_command_started_events(&["getMore"]); assert_eq!(get_more_events.len(), 1); @@ -385,6 +387,7 @@ async fn cursor_iteration_in_a_transaction() { .await .unwrap(); assert_eq!(result.upserted_count, 2); + assert_eq!(result.update_results.unwrap().len(), 2); let command_started_events = event_buffer.get_command_started_events(&["getMore"]); assert_eq!(command_started_events.len(), 1); From fe6de0a3f4597b714d4599e5aeab74e3dedc9928 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 11 Apr 2024 11:35:10 -0600 Subject: [PATCH 47/75] skip and sync files --- src/test/bulk_write.rs | 5 +- .../client-bulkWrite-errorResponse.json | 68 +++++++++++++++++++ .../client-bulkWrite-errorResponse.yml | 37 ++++++++++ .../client-bulkWrite-errors.yml | 2 +- .../client-bulkWrite-options.yml | 1 - 5 files changed, 110 insertions(+), 3 deletions(-) create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.json create mode 100644 src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.yml diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 17c2069a2..8ff532105 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -18,7 +18,10 @@ use super::TestClient; #[tokio::test(flavor = "multi_thread")] async fn run_unified() { - run_unified_tests(&["crud", "unified", "new-bulk-write"]).await; + run_unified_tests(&["crud", "unified", "new-bulk-write"]) + // TODO RUST-1405: unskip this test + .skip_files(&["client-bulkWrite-errorResponse.json"]) + .await; } #[tokio::test] diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.json new file mode 100644 index 000000000..edf2339d8 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.json @@ -0,0 +1,68 @@ +{ + "description": "client bulkWrite errorResponse", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite operations support errorResponse assertions", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.yml new file mode 100644 index 000000000..45e53171e --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.yml @@ -0,0 +1,37 @@ +description: "client bulkWrite errorResponse" +schemaVersion: "1.12" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + useMultipleMongoses: false # Avoid setting fail points with multiple mongoses + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite operations support errorResponse assertions" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ bulkWrite ] + errorCode: &errorCode 8 # UnknownError + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1 } + expectError: + errorCode: *errorCode + errorResponse: + code: *errorCode diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml index 97ce17560..6d5012229 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml @@ -9,7 +9,7 @@ createEntities: observeEvents: [ commandStartedEvent ] uriOptions: retryWrites: false - useMultipleMongoses: false + useMultipleMongoses: false # Target a single mongos with failpoint - database: id: &database0 database0 client: *client0 diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml index 461944574..fdcf78879 100644 --- a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml @@ -348,4 +348,3 @@ tests: document: { _id: 3, x: 33 } nsInfo: - ns: *namespace - From 356248a654b14f10ac986dd4bdd105f62b92d434 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 11 Apr 2024 11:48:06 -0600 Subject: [PATCH 48/75] remove method --- src/operation.rs | 6 ------ src/operation/aggregate.rs | 2 +- src/operation/aggregate/change_stream.rs | 2 +- src/operation/bulk_write.rs | 6 +++++- src/operation/find.rs | 2 +- src/operation/list_collections.rs | 6 +++++- src/operation/list_indexes.rs | 6 +++++- src/operation/run_cursor_command.rs | 6 +++++- 8 files changed, 23 insertions(+), 13 deletions(-) diff --git a/src/operation.rs b/src/operation.rs index ee5f9a831..252a64c43 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -87,12 +87,6 @@ pub(crate) struct ExecutionContext<'a> { pub(crate) session: Option<&'a mut ClientSession>, } -impl<'a> ExecutionContext<'a> { - pub(crate) fn stream_description(&'a self) -> Result<&'a StreamDescription> { - self.connection.stream_description() - } -} - /// A trait modeling the behavior of a server side operation. /// /// No methods in this trait should have default behaviors to ensure that wrapper operations diff --git a/src/operation/aggregate.rs b/src/operation/aggregate.rs index aaa589c04..8b8e8ecfc 100644 --- a/src/operation/aggregate.rs +++ b/src/operation/aggregate.rs @@ -96,7 +96,7 @@ impl OperationWithDefaults for Aggregate { wc_error_info.validate()?; }; - let description = context.stream_description()?; + let description = context.connection.stream_description()?; // The comment should only be propagated to getMore calls on 4.4+. let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { diff --git a/src/operation/aggregate/change_stream.rs b/src/operation/aggregate/change_stream.rs index 062dc6b30..3fe6a888b 100644 --- a/src/operation/aggregate/change_stream.rs +++ b/src/operation/aggregate/change_stream.rs @@ -112,7 +112,7 @@ impl OperationWithDefaults for ChangeStreamAggregate { && o.start_after.is_none() }; - let description = context.stream_description()?; + let description = context.connection.stream_description()?; if self.args.options.as_ref().map_or(true, has_no_time) && description.max_wire_version.map_or(false, |v| v >= 7) && spec.initial_buffer.is_empty() diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index f476f557e..49eead7d1 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -293,7 +293,11 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { let specification = CursorSpecification::new( response.body.cursor, - context.stream_description()?.server_address.clone(), + context + .connection + .stream_description()? + .server_address + .clone(), None, None, self.options.and_then(|options| options.comment.clone()), diff --git a/src/operation/find.rs b/src/operation/find.rs index e094e3944..d3da31ed3 100644 --- a/src/operation/find.rs +++ b/src/operation/find.rs @@ -102,7 +102,7 @@ impl OperationWithDefaults for Find { async move { let response: CursorBody = response.body()?; - let description = context.stream_description()?; + let description = context.connection.stream_description()?; // The comment should only be propagated to getMore calls on 4.4+. let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { diff --git a/src/operation/list_collections.rs b/src/operation/list_collections.rs index 2e445b777..0b199e9f2 100644 --- a/src/operation/list_collections.rs +++ b/src/operation/list_collections.rs @@ -66,7 +66,11 @@ impl OperationWithDefaults for ListCollections { let response: CursorBody = response.body()?; Ok(CursorSpecification::new( response.cursor, - context.stream_description()?.server_address.clone(), + context + .connection + .stream_description()? + .server_address + .clone(), self.options.as_ref().and_then(|opts| opts.batch_size), None, None, diff --git a/src/operation/list_indexes.rs b/src/operation/list_indexes.rs index f87af0cd4..240b81b3b 100644 --- a/src/operation/list_indexes.rs +++ b/src/operation/list_indexes.rs @@ -56,7 +56,11 @@ impl OperationWithDefaults for ListIndexes { let response: CursorBody = response.body()?; Ok(CursorSpecification::new( response.cursor, - context.stream_description()?.server_address.clone(), + context + .connection + .stream_description()? + .server_address + .clone(), self.options.as_ref().and_then(|o| o.batch_size), self.options.as_ref().and_then(|o| o.max_time), None, diff --git a/src/operation/run_cursor_command.rs b/src/operation/run_cursor_command.rs index 2343ad93f..575930a9e 100644 --- a/src/operation/run_cursor_command.rs +++ b/src/operation/run_cursor_command.rs @@ -104,7 +104,11 @@ impl<'conn> Operation for RunCursorCommand<'conn> { Ok(CursorSpecification::new( cursor_response.cursor, - context.stream_description()?.server_address.clone(), + context + .connection + .stream_description()? + .server_address + .clone(), self.options.as_ref().and_then(|opts| opts.batch_size), self.options.as_ref().and_then(|opts| opts.max_time), comment, From ecb8973b4d3dedc31753223174246387a3da0f70 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 11 Apr 2024 11:59:38 -0600 Subject: [PATCH 49/75] fix integer casts --- src/bson_util.rs | 4 ++-- src/operation/bulk_write.rs | 17 +++++++++++------ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/bson_util.rs b/src/bson_util.rs index 1db40e87a..06c11f062 100644 --- a/src/bson_util.rs +++ b/src/bson_util.rs @@ -183,8 +183,8 @@ pub(crate) fn get_or_prepend_id_field(doc: &mut RawDocumentBuf) -> Result new_bytes.pop(); new_bytes.extend(&doc.as_bytes()[4..]); - let new_length = (new_bytes.len() as i32).to_le_bytes(); - new_bytes[0..4].copy_from_slice(&new_length); + let new_length: i32 = Checked::new(new_bytes.len()).try_into()?; + new_bytes[0..4].copy_from_slice(&new_length.to_le_bytes()); *doc = RawDocumentBuf::from_bytes(new_bytes)?; diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 49eead7d1..1952aa72f 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -8,6 +8,7 @@ use futures_util::{FutureExt, TryStreamExt}; use crate::{ bson::{rawdoc, Bson, RawDocumentBuf}, bson_util::{self, array_entry_size_bytes, extend_raw_document_buf, vec_to_raw_array_buf}, + checked::Checked, cmap::{Command, RawCommandResponse, StreamDescription}, cursor::CursorSpecification, error::{ClientBulkWriteError, Error, ErrorKind, Result}, @@ -187,17 +188,20 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { const NAME: &'static str = "bulkWrite"; fn build(&mut self, description: &StreamDescription) -> Result> { - let max_operations = description.max_write_batch_size; - let max_doc_size = description.max_bson_object_size as usize; - let max_message_size = description.max_message_size_bytes as usize - COMMAND_OVERHEAD_SIZE; + let max_operations: usize = Checked::new(description.max_write_batch_size).try_into()?; + let max_doc_size: usize = Checked::new(description.max_bson_object_size).try_into()?; + let max_message_size = Checked::new(description.max_message_size_bytes) + .try_into::()? + - COMMAND_OVERHEAD_SIZE; let mut namespace_info = NamespaceInfo::new(); let mut ops = Vec::new(); let mut size = 0; - for (i, model) in self.models.iter().take(max_operations as usize).enumerate() { + for (i, model) in self.models.iter().take(max_operations).enumerate() { let (namespace_index, namespace_size) = namespace_info.get_index(model.namespace()); - let mut operation = rawdoc! { model.operation_name(): namespace_index as i32 }; + let operation_namespace_index: i32 = Checked::new(namespace_index).try_into()?; + let mut operation = rawdoc! { model.operation_name(): operation_namespace_index }; let (model_doc, inserted_id) = model.get_ops_document_contents()?; extend_raw_document_buf(&mut operation, model_doc)?; @@ -278,7 +282,8 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { // A partial result with summary info should only be created if one or more // operations were successful. - if response.summary.n_errors < self.n_attempted as i64 { + let n_errors: usize = Checked::new(response.summary.n_errors).try_into()?; + if n_errors < self.n_attempted { bulk_write_error .partial_result .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) From 36ba52c9a6aabce2417059c9d39184f8b55682f6 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 11 Apr 2024 12:07:04 -0600 Subject: [PATCH 50/75] skip transaction test on standalone --- src/test/bulk_write.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 8ff532105..4d373b164 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -349,8 +349,11 @@ async fn cursor_iteration_in_a_transaction() { .build() .await; - if client.server_version_lt(8, 0) { - log_uncaptured("skipping cursor_iteration_in_a_transaction: bulkWrite requires 8.0+"); + if client.server_version_lt(8, 0) || client.is_standalone() { + log_uncaptured( + "skipping cursor_iteration_in_a_transaction: bulkWrite requires 8.0+, transactions \ + require a non-standalone topology", + ); return; } From 41194ab2652553c643255b6cbad97a94f0f8ef36 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Wed, 17 Apr 2024 09:55:41 -0600 Subject: [PATCH 51/75] don't use multiple mongoses --- src/test/bulk_write.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 4d373b164..fe63e57c6 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -270,8 +270,12 @@ async fn successful_cursor_iteration() { #[tokio::test(flavor = "multi_thread")] async fn failed_cursor_iteration() { + let mut options = get_client_options().await.clone(); + options.hosts.drain(1..); + let event_buffer = EventBuffer::new(); let client = Client::test_builder() + .options(options) .event_buffer(event_buffer.clone()) .build() .await; From 0f2a52342d28511cf67666f62412f7f9b705688a Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Wed, 17 Apr 2024 14:21:14 -0600 Subject: [PATCH 52/75] update transactions tests --- .../unified/client-bulkWrite.json | 178 +++++++++++++++++- src/test/spec/unified_runner/operation.rs | 23 ++- 2 files changed, 192 insertions(+), 9 deletions(-) diff --git a/src/test/spec/json/transactions/unified/client-bulkWrite.json b/src/test/spec/json/transactions/unified/client-bulkWrite.json index 4fbb4e1f2..b13702799 100644 --- a/src/test/spec/json/transactions/unified/client-bulkWrite.json +++ b/src/test/spec/json/transactions/unified/client-bulkWrite.json @@ -1,6 +1,6 @@ { "description": "client bulkWrite transactions", - "schemaVersion": "1.0", + "schemaVersion": "1.3", "runOnRequirements": [ { "minServerVersion": "8.0", @@ -39,6 +39,23 @@ "id": "session0", "client": "client0" } + }, + { + "client": { + "id": "client_with_wmajority", + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "session": { + "id": "session_with_wmajority", + "client": "client_with_wmajority" + } } ], "_yamlAnchors": { @@ -193,12 +210,16 @@ "1": { "matchedCount": 1, "modifiedCount": 1, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "2": { "matchedCount": 2, "modifiedCount": 2, - "upsertedId": null + "upsertedId": { + "$$exists": false + } }, "3": { "matchedCount": 1, @@ -381,6 +402,157 @@ ] } ] + }, + { + "description": "client writeConcern ignored for client bulkWrite in transaction", + "operations": [ + { + "object": "session_with_wmajority", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "client_with_wmajority", + "name": "clientBulkWrite", + "arguments": { + "session": "session_with_wmajority", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "session_with_wmajority", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client_with_wmajority", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] } ] } diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index c5abda3a2..88181928e 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -44,14 +44,11 @@ use crate::{ action::Action, bson::{doc, to_bson, Bson, Document}, client::session::TransactionState, - coll::options::Hint, - collation::Collation, - db::options::{ListCollectionsOptions, RunCursorCommandOptions}, error::{ErrorKind, Result}, - gridfs::options::{GridFsDownloadByNameOptions, GridFsUploadOptions}, options::{ AggregateOptions, ChangeStreamOptions, + Collation, CountOptions, CreateCollectionOptions, DeleteOptions, @@ -63,13 +60,19 @@ use crate::{ FindOneAndUpdateOptions, FindOneOptions, FindOptions, + GridFsDownloadByNameOptions, + GridFsUploadOptions, + Hint, IndexOptions, InsertManyOptions, InsertOneOptions, + ListCollectionsOptions, ListIndexesOptions, ReadConcern, ReplaceOptions, + RunCursorCommandOptions, SelectionCriteria, + TransactionOptions, UpdateModifications, UpdateOptions, }, @@ -1829,7 +1832,10 @@ impl TestOperation for AssertSessionNotDirty { #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase", deny_unknown_fields)] -pub(super) struct StartTransaction {} +pub(super) struct StartTransaction { + #[serde(flatten)] + options: TransactionOptions, +} impl TestOperation for StartTransaction { fn execute_entity_operation<'a>( @@ -1839,7 +1845,12 @@ impl TestOperation for StartTransaction { ) -> BoxFuture<'a, Result>> { async move { with_mut_session!(test_runner, id, |session| { - async move { session.start_transaction().await } + async move { + session + .start_transaction() + .with_options(self.options.clone()) + .await + } }) .await?; Ok(None) From 956729e713bef7abf7a50b650309c8ea5dc9001f Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Wed, 17 Apr 2024 14:21:26 -0600 Subject: [PATCH 53/75] add file --- .../transactions/unified/client-bulkWrite.yml | 245 ++++++++++++++++++ 1 file changed, 245 insertions(+) create mode 100644 src/test/spec/json/transactions/unified/client-bulkWrite.yml diff --git a/src/test/spec/json/transactions/unified/client-bulkWrite.yml b/src/test/spec/json/transactions/unified/client-bulkWrite.yml new file mode 100644 index 000000000..d2234ed30 --- /dev/null +++ b/src/test/spec/json/transactions/unified/client-bulkWrite.yml @@ -0,0 +1,245 @@ +description: "client bulkWrite transactions" +schemaVersion: "1.3" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name transaction-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + - session: + id: &session0 session0 + client: *client0 + - client: + id: &client_with_wmajority client_with_wmajority + uriOptions: + w: majority + observeEvents: + - commandStartedEvent + - session: + id: &session_with_wmajority session_with_wmajority + client: *client_with_wmajority + +_yamlAnchors: + namespace: &namespace "transaction-tests.coll0" + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + +tests: + - description: "client bulkWrite in a transaction" + operations: + - object: *session0 + name: startTransaction + - object: *client0 + name: clientBulkWrite + arguments: + session: *session0 + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + 0: + insertedId: 8 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 3: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 4 + deleteResults: + 4: + deletedCount: 1 + 5: + deletedCount: 2 + - object: *session0 + name: commitTransaction + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + lsid: { $$sessionLsid: *session0 } + txnNumber: 1 + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session0 } + txnNumber: 1 + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: 'client writeConcern ignored for client bulkWrite in transaction' + operations: + - object: *session_with_wmajority + name: startTransaction + arguments: + writeConcern: + w: 1 + - object: *client_with_wmajority + name: clientBulkWrite + arguments: + session: *session_with_wmajority + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + - object: *session_with_wmajority + name: commitTransaction + expectEvents: + - + client: *client_with_wmajority + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + lsid: { $$sessionLsid: *session_with_wmajority } + txnNumber: 1 + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + nsInfo: + - ns: *namespace + - + commandStartedEvent: + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session_with_wmajority } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: + w: 1 + commandName: commitTransaction + databaseName: admin + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + - { _id: 8, x: 88 } From c943b89a6e929684d7624ba31d11bf0a77d8c135 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Wed, 17 Apr 2024 15:00:40 -0600 Subject: [PATCH 54/75] transaction test, change error expectations --- .../unified/client-bulkWrite.json | 34 ++++ .../transactions/unified/client-bulkWrite.yml | 17 ++ src/test/spec/unified_runner/operation.rs | 11 +- src/test/spec/unified_runner/test_file.rs | 163 ++++++------------ 4 files changed, 112 insertions(+), 113 deletions(-) diff --git a/src/test/spec/json/transactions/unified/client-bulkWrite.json b/src/test/spec/json/transactions/unified/client-bulkWrite.json index b13702799..f8f1d9716 100644 --- a/src/test/spec/json/transactions/unified/client-bulkWrite.json +++ b/src/test/spec/json/transactions/unified/client-bulkWrite.json @@ -553,6 +553,40 @@ ] } ] + }, + { + "description": "client bulkWrite with writeConcern in a transaction causes a transaction error", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "writeConcern": { + "w": 1 + }, + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot set write concern after starting a transaction" + } + } + ] } ] } diff --git a/src/test/spec/json/transactions/unified/client-bulkWrite.yml b/src/test/spec/json/transactions/unified/client-bulkWrite.yml index d2234ed30..eda2babbe 100644 --- a/src/test/spec/json/transactions/unified/client-bulkWrite.yml +++ b/src/test/spec/json/transactions/unified/client-bulkWrite.yml @@ -243,3 +243,20 @@ tests: - { _id: 6, x: 66 } - { _id: 7, x: 77 } - { _id: 8, x: 88 } + - description: "client bulkWrite with writeConcern in a transaction causes a transaction error" + operations: + - object: *session0 + name: startTransaction + - object: *client0 + name: clientBulkWrite + arguments: + session: *session0 + writeConcern: + w: 1 + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + expectError: + isClientError: true + errorContains: "Cannot set write concern after starting a transaction" diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index 88181928e..16df95733 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -8,6 +8,7 @@ use std::{ convert::TryInto, fmt::Debug, ops::{Deref, DerefMut}, + panic::{catch_unwind, AssertUnwindSafe}, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -226,7 +227,7 @@ impl Operation { "{}: {} should return an error", description, self.name )); - expect_error.verify_result(&error, description).unwrap(); + expect_error.verify_result(&error, description); } Expectation::Ignore => (), } @@ -2444,12 +2445,14 @@ impl TestOperation for Loop { self.report_success(&mut entities); } (Err(error), Expectation::Error(ref expected_error)) => { - match expected_error.verify_result(&error, operation.name.as_str()) { + match catch_unwind(AssertUnwindSafe(|| { + expected_error.verify_result(&error, operation.name.as_str()) + })) { Ok(_) => self.report_success(&mut entities), - Err(e) => report_error_or_failure!( + Err(_) => report_error_or_failure!( self.store_failures_as_entity, self.store_errors_as_entity, - e, + format!("expected {:?}, got {:?}", expected_error, error), &mut entities ), } diff --git a/src/test/spec/unified_runner/test_file.rs b/src/test/spec/unified_runner/test_file.rs index 395bde66a..26d0f8038 100644 --- a/src/test/spec/unified_runner/test_file.rs +++ b/src/test/spec/unified_runner/test_file.rs @@ -1,6 +1,7 @@ use std::{borrow::Cow, collections::HashMap, fmt::Write, sync::Arc, time::Duration}; use percent_encoding::NON_ALPHANUMERIC; +use pretty_assertions::assert_eq; use regex::Regex; use semver::{Version, VersionReq}; use serde::{Deserialize, Deserializer}; @@ -493,95 +494,47 @@ pub(crate) struct ExpectError { } impl ExpectError { - pub(crate) fn verify_result( - &self, - error: &Error, - description: impl AsRef, - ) -> std::result::Result<(), String> { - let description = description.as_ref(); + pub(crate) fn verify_result(&self, error: &Error, description: impl AsRef) { + let context = format!( + "test description: {}\nerror: {:?}\n", + description.as_ref(), + error + ); if let Some(is_client_error) = self.is_client_error { - if is_client_error != !error.is_server_error() { - return Err(format!( - "{}: expected client error but got {:?}", - description, error - )); - } + assert_eq!(!error.is_server_error(), is_client_error, "{context}"); } if let Some(error_contains) = &self.error_contains { - match &error.message() { - Some(msg) if msg.contains(error_contains) => (), - _ => { - return Err(format!( - "{}: \"{}\" should include message field", - description, error - )) - } - } + let Some(message) = error.message() else { + panic!("{context}expected error to have message"); + }; + assert!(message.contains(error_contains), "{context}"); } if let Some(error_code) = self.error_code { - match &error.code() { - Some(code) => { - if code != &error_code { - return Err(format!( - "{}: error code {} ({:?}) did not match expected error code {}", - description, - code, - error.code_name(), - error_code - )); - } - } - None => { - return Err(format!( - "{}: {:?} was expected to include code {} but had no code", - description, error, error_code - )) - } - } + let Some(actual_code) = error.code() else { + panic!("{context}expected error to have code"); + }; + assert_eq!(actual_code, error_code, "{context}"); } if let Some(expected_code_name) = &self.error_code_name { - match error.code_name() { - Some(name) => { - if name != expected_code_name { - return Err(format!( - "{}: error code name \"{}\" did not match expected error code name \ - \"{}\"", - description, name, expected_code_name, - )); - } - } - None => { - return Err(format!( - "{}: {:?} was expected to include code name \"{}\" but had no code name", - description, error, expected_code_name - )) - } - } + let Some(actual_code_name) = error.code_name() else { + panic!("{}: expected error to have code name", context); + }; + assert_eq!(actual_code_name, expected_code_name, "{}", context); } if let Some(error_labels_contain) = &self.error_labels_contain { for label in error_labels_contain { - if !error.contains_label(label) { - return Err(format!( - "{}: expected {:?} to contain label \"{}\"", - description, error, label - )); - } + assert!(error.contains_label(label), "{}", context); } } if let Some(error_labels_omit) = &self.error_labels_omit { for label in error_labels_omit { - if error.contains_label(label) { - return Err(format!( - "{}: expected {:?} to omit label \"{}\"", - description, error, label - )); - } + assert!(!error.contains_label(label), "{}", context); } } @@ -590,64 +543,56 @@ impl ExpectError { ErrorKind::ClientBulkWrite(ClientBulkWriteError { partial_result: Some(ref partial_result), .. - }) => Some(bson::to_bson(partial_result).map_err(|e| e.to_string())?), + }) => Some( + bson::to_bson(partial_result) + .map_err(|e| e.to_string()) + .unwrap(), + ), _ => None, }; - results_match(actual_result.as_ref(), expected_result, false, None)?; + results_match(actual_result.as_ref(), expected_result, false, None).expect(&context); } if let Some(ref write_errors) = self.write_errors { - let actual_write_errors = match *error.kind { - ErrorKind::ClientBulkWrite(ref bulk_write_error) => &bulk_write_error.write_errors, - ref other => { - return Err(format!( - "{}: expected bulk write error, got {:?}", - description, other - )) - } + let ErrorKind::ClientBulkWrite(ClientBulkWriteError { + write_errors: ref actual_write_errors, + .. + }) = *error.kind + else { + panic!("{context}expected client bulk write error"); }; for (expected_index, expected_error) in write_errors { - let actual_error = actual_write_errors.get(expected_index).ok_or_else(|| { - format!( - "{}: expected error for operation at index {}", - description, expected_index - ) - })?; - let actual_error = bson::to_bson(&actual_error).map_err(|e| e.to_string())?; - results_match(Some(&actual_error), expected_error, true, None)?; + let actual_error = actual_write_errors.get(expected_index).expect(&context); + let actual_error = bson::to_bson(&actual_error) + .map_err(|e| e.to_string()) + .expect(&context); + results_match(Some(&actual_error), expected_error, true, None).expect(&context); } } if let Some(ref write_concern_errors) = self.write_concern_errors { - let actual_write_concern_errors = match *error.kind { - ErrorKind::ClientBulkWrite(ref bulk_write_error) => { - &bulk_write_error.write_concern_errors - } - ref other => { - return Err(format!( - "{}: expected bulk write error, got {:?}", - description, other - )) - } + let ErrorKind::ClientBulkWrite(ClientBulkWriteError { + write_concern_errors: ref actual_write_concern_errors, + .. + }) = *error.kind + else { + panic!("{context}expected client bulk write error"); }; - if actual_write_concern_errors.len() != write_concern_errors.len() { - return Err(format!( - "{}: got {} write errors, expected {}", - description, - actual_write_concern_errors.len(), - write_concern_errors.len() - )); - } + assert_eq!( + actual_write_concern_errors.len(), + write_concern_errors.len(), + "{context}" + ); for (actual, expected) in actual_write_concern_errors.iter().zip(write_concern_errors) { - let actual = bson::to_bson(&actual).map_err(|e| e.to_string())?; - results_match(Some(&actual), expected, true, None)?; + let actual = bson::to_bson(&actual) + .map_err(|e| e.to_string()) + .expect(&context); + results_match(Some(&actual), expected, true, None).expect(&context); } } - - Ok(()) } } From 0cb96764e16cd48c3aef84b8c1f61365e25ff7d9 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 18 Apr 2024 11:08:17 -0600 Subject: [PATCH 55/75] fail point improvements --- src/cmap/test/integration.rs | 11 +- .../server_selection/test/in_window.rs | 9 +- src/sdam/description/topology/test/sdam.rs | 9 +- src/sdam/test.rs | 11 +- src/test.rs | 2 - src/test/bulk_write.rs | 11 +- src/test/change_stream.rs | 17 +- src/test/client.rs | 12 +- src/test/csfle.rs | 15 +- src/test/spec/gridfs.rs | 7 +- src/test/spec/retryable_reads.rs | 16 +- src/test/spec/retryable_writes.rs | 31 ++-- src/test/spec/retryable_writes/test_file.rs | 2 +- src/test/spec/sdam.rs | 9 +- src/test/spec/transactions.rs | 7 +- src/test/spec/unified_runner/operation.rs | 2 +- src/test/spec/unified_runner/test_runner.rs | 2 +- src/test/spec/v2_runner.rs | 6 +- src/test/spec/v2_runner/operation.rs | 2 +- src/test/spec/v2_runner/test_file.rs | 3 +- src/test/util.rs | 3 +- src/test/util/failpoint.rs | 165 ------------------ 22 files changed, 101 insertions(+), 251 deletions(-) delete mode 100644 src/test/util/failpoint.rs diff --git a/src/cmap/test/integration.rs b/src/cmap/test/integration.rs index 6afe61130..7d7a0a997 100644 --- a/src/cmap/test/integration.rs +++ b/src/cmap/test/integration.rs @@ -18,9 +18,10 @@ use crate::{ test::{ get_client_options, log_uncaptured, - util::event_buffer::EventBuffer, - FailPoint, - FailPointMode, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, TestClient, }, }; @@ -190,7 +191,7 @@ async fn connection_error_during_establishment() { let _guard = client .enable_fail_point( - FailPoint::new( + FailPoint::fail_command( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::Times(10), ) @@ -249,7 +250,7 @@ async fn connection_error_during_operation() { let _guard = client .enable_fail_point( - FailPoint::new(&["ping"], FailPointMode::Times(10)).close_connection(true), + FailPoint::fail_command(&["ping"], FailPointMode::Times(10)).close_connection(true), ) .await .unwrap(); diff --git a/src/sdam/description/topology/server_selection/test/in_window.rs b/src/sdam/description/topology/server_selection/test/in_window.rs index 352c05355..b7f572ef3 100644 --- a/src/sdam/description/topology/server_selection/test/in_window.rs +++ b/src/sdam/description/topology/server_selection/test/in_window.rs @@ -18,10 +18,11 @@ use crate::{ get_client_options, log_uncaptured, run_spec_test, - util::event_buffer::EventBuffer, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, Event, - FailPoint, - FailPointMode, TestClient, }, Client, @@ -260,7 +261,7 @@ async fn load_balancing_test() { let slow_host = get_client_options().await.hosts[0].clone(); let slow_host_criteria = SelectionCriteria::Predicate(Arc::new(move |si| si.address() == &slow_host)); - let fail_point = FailPoint::new(&["find"], FailPointMode::AlwaysOn) + let fail_point = FailPoint::fail_command(&["find"], FailPointMode::AlwaysOn) .block_connection(Duration::from_millis(500)) .selection_criteria(slow_host_criteria); let guard = setup_client.enable_fail_point(fail_point).await.unwrap(); diff --git a/src/sdam/description/topology/test/sdam.rs b/src/sdam/description/topology/test/sdam.rs index 1b40b5f63..eff5ea6c2 100644 --- a/src/sdam/description/topology/test/sdam.rs +++ b/src/sdam/description/topology/test/sdam.rs @@ -30,10 +30,11 @@ use crate::{ get_client_options, log_uncaptured, run_spec_test, - util::event_buffer::EventBuffer, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, Event, - FailPoint, - FailPointMode, TestClient, }, }; @@ -678,7 +679,7 @@ async fn heartbeat_events() { options.heartbeat_freq = None; let fp_client = TestClient::with_options(Some(options)).await; - let fail_point = FailPoint::new( + let fail_point = FailPoint::fail_command( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::AlwaysOn, ) diff --git a/src/sdam/test.rs b/src/sdam/test.rs index e6684e527..3adbbde95 100644 --- a/src/sdam/test.rs +++ b/src/sdam/test.rs @@ -18,10 +18,11 @@ use crate::{ test::{ get_client_options, log_uncaptured, - util::event_buffer::EventBuffer, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, Event, - FailPoint, - FailPointMode, TestClient, }, Client, @@ -49,7 +50,7 @@ async fn min_heartbeat_frequency() { let _guard = setup_client .enable_fail_point( - FailPoint::new( + FailPoint::fail_command( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::Times(5), ) @@ -135,7 +136,7 @@ async fn sdam_pool_management() { let _guard = client .enable_fail_point( - FailPoint::new( + FailPoint::fail_command( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::Times(4), ) diff --git a/src/test.rs b/src/test.rs index 817ead68b..7f580e0f7 100644 --- a/src/test.rs +++ b/src/test.rs @@ -39,8 +39,6 @@ pub(crate) use self::{ file_level_log, log_uncaptured, Event, - FailPoint, - FailPointMode, MatchErrExt, Matchable, TestClient, diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index fe63e57c6..dbefdbc38 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -6,9 +6,10 @@ use crate::{ get_client_options, log_uncaptured, spec::unified_runner::run_unified_tests, - util::event_buffer::EventBuffer, - FailPoint, - FailPointMode, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, }, Client, Namespace, @@ -131,7 +132,7 @@ async fn write_concern_error_batches() { let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; - let fail_point = FailPoint::new(&["bulkWrite"], FailPointMode::Times(2)) + let fail_point = FailPoint::fail_command(&["bulkWrite"], FailPointMode::Times(2)) .write_concern_error(doc! { "code": 91, "errmsg": "Replication is being shut down" }); let _guard = client.enable_fail_point(fail_point).await.unwrap(); @@ -287,7 +288,7 @@ async fn failed_cursor_iteration() { let max_bson_object_size = client.server_info.max_bson_object_size as usize; - let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(8); + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(8); let _guard = client.enable_fail_point(fail_point).await.unwrap(); let collection = client.database("db").collection::("coll"); diff --git a/src/test/change_stream.rs b/src/test/change_stream.rs index 30ece4161..28562713a 100644 --- a/src/test/change_stream.rs +++ b/src/test/change_stream.rs @@ -12,7 +12,7 @@ use crate::{ db::options::ChangeStreamPreAndPostImages, event::command::{CommandEvent, CommandStartedEvent, CommandSucceededEvent}, options::{Acknowledgment, WriteConcern}, - test::{FailPoint, FailPointMode}, + test::util::fail_point::{FailPoint, FailPointMode}, Client, Collection, }; @@ -171,7 +171,7 @@ async fn resumes_on_error() -> Result<()> { }) if key == doc! { "_id": 1 } )); - let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(43); let _guard = client.enable_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }).await?; @@ -200,7 +200,8 @@ async fn does_not_resume_aggregate() -> Result<()> { None => return Ok(()), }; - let fail_point = FailPoint::new(&["aggregate"], FailPointMode::Times(1)).error_code(43); + let fail_point = + FailPoint::fail_command(&["aggregate"], FailPointMode::Times(1)).error_code(43); let _guard = client.enable_fail_point(fail_point).await?; assert!(coll.watch().await.is_err()); @@ -267,8 +268,8 @@ async fn resume_kill_cursor_error_suppressed() -> Result<()> { }) if key == doc! { "_id": 1 } )); - let fail_point = - FailPoint::new(&["getMore", "killCursors"], FailPointMode::Times(1)).error_code(43); + let fail_point = FailPoint::fail_command(&["getMore", "killCursors"], FailPointMode::Times(1)) + .error_code(43); let _guard = client.enable_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }).await?; @@ -309,7 +310,7 @@ async fn resume_start_at_operation_time() -> Result<()> { return Ok(()); } - let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(43); let _guard = client.enable_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }).await?; @@ -520,7 +521,7 @@ async fn resume_uses_start_after() -> Result<()> { // Create an event, and synthesize a resumable error when calling `getMore` for that event. coll.insert_one(doc! {}).await?; - let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(43); let _guard = client.enable_fail_point(fail_point).await?; stream.next().await.transpose()?; @@ -576,7 +577,7 @@ async fn resume_uses_resume_after() -> Result<()> { // Create an event, and synthesize a resumable error when calling `getMore` for that event. coll.insert_one(doc! {}).await?; - let fail_point = FailPoint::new(&["getMore"], FailPointMode::Times(1)).error_code(43); + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(43); let _guard = client.enable_fail_point(fail_point).await?; stream.next().await.transpose()?; diff --git a/src/test/client.rs b/src/test/client.rs index ab5ce4284..6eb8b5103 100644 --- a/src/test/client.rs +++ b/src/test/client.rs @@ -14,10 +14,12 @@ use crate::{ test::{ get_client_options, log_uncaptured, - util::{event_buffer::EventBuffer, TestClient}, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + TestClient, + }, Event, - FailPoint, - FailPointMode, SERVER_API, }, Client, @@ -708,7 +710,7 @@ async fn retry_commit_txn_check_out() { // Enable a fail point that clears the connection pools so that commitTransaction will create a // new connection during checkout. - let fail_point = FailPoint::new(&["ping"], FailPointMode::Times(1)).error_code(11600); + let fail_point = FailPoint::fail_command(&["ping"], FailPointMode::Times(1)).error_code(11600); let _guard = setup_client.enable_fail_point(fail_point).await.unwrap(); #[allow(deprecated)] @@ -753,7 +755,7 @@ async fn retry_commit_txn_check_out() { .await .expect("should see mark available event"); - let fail_point = FailPoint::new( + let fail_point = FailPoint::fail_command( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::Times(1), ) diff --git a/src/test/csfle.rs b/src/test/csfle.rs index 63589960b..d0fb87ed1 100644 --- a/src/test/csfle.rs +++ b/src/test/csfle.rs @@ -44,7 +44,13 @@ use crate::{ WriteConcern, }, runtime, - test::{util::event_buffer::EventBuffer, Event}, + test::{ + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, + Event, + }, Client, Collection, IndexModel, @@ -53,7 +59,7 @@ use crate::{ #[allow(deprecated)] use super::EventClient; -use super::{get_client_options, log_uncaptured, FailPoint, FailPointMode, TestClient}; +use super::{get_client_options, log_uncaptured, TestClient}; type Result = anyhow::Result; @@ -2407,7 +2413,8 @@ async fn decryption_events_command_error() -> Result<()> { None => return Ok(()), }; - let fail_point = FailPoint::new(&["aggregate"], FailPointMode::Times(1)).error_code(123); + let fail_point = + FailPoint::fail_command(&["aggregate"], FailPointMode::Times(1)).error_code(123); let _guard = td.setup_client.enable_fail_point(fail_point).await.unwrap(); let err = td .decryption_events @@ -2432,7 +2439,7 @@ async fn decryption_events_network_error() -> Result<()> { None => return Ok(()), }; - let fail_point = FailPoint::new(&["aggregate"], FailPointMode::Times(1)) + let fail_point = FailPoint::fail_command(&["aggregate"], FailPointMode::Times(1)) .error_code(123) .close_connection(true); let _guard = td.setup_client.enable_fail_point(fail_point).await.unwrap(); diff --git a/src/test/spec/gridfs.rs b/src/test/spec/gridfs.rs index 91acf7287..5fd5403d0 100644 --- a/src/test/spec/gridfs.rs +++ b/src/test/spec/gridfs.rs @@ -11,8 +11,7 @@ use crate::{ test::{ get_client_options, spec::unified_runner::run_unified_tests, - FailPoint, - FailPointMode, + util::fail_point::{FailPoint, FailPointMode}, TestClient, }, }; @@ -231,7 +230,7 @@ async fn upload_stream_errors() { .await .unwrap(); - let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)).error_code(1234); + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)).error_code(1234); let _guard = client.enable_fail_point(fail_point).await.unwrap(); let error = get_mongo_error(upload_stream.write_all(&[11]).await); @@ -248,7 +247,7 @@ async fn upload_stream_errors() { upload_stream.write_all(&[11]).await.unwrap(); - let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)).error_code(1234); + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)).error_code(1234); let _guard = client.enable_fail_point(fail_point).await.unwrap(); let error = get_mongo_error(upload_stream.close().await); diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index 5dae20a17..dc2bab0ba 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -13,10 +13,11 @@ use crate::{ get_client_options, log_uncaptured, spec::{unified_runner::run_unified_tests, v2_runner::run_v2_tests}, - util::event_buffer::EventBuffer, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, Event, - FailPoint, - FailPointMode, TestClient, }, Client, @@ -52,7 +53,8 @@ async fn retry_releases_connection() { .collection("retry_releases_connection"); collection.insert_one(doc! { "x": 1 }).await.unwrap(); - let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)).close_connection(true); + let fail_point = + FailPoint::fail_command(&["find"], FailPointMode::Times(1)).close_connection(true); let _guard = client.enable_fail_point(fail_point).await.unwrap(); runtime::timeout( @@ -96,7 +98,7 @@ async fn retry_read_pool_cleared() { .collection("retry_read_pool_cleared"); collection.insert_one(doc! { "x": 1 }).await.unwrap(); - let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)) + let fail_point = FailPoint::fail_command(&["find"], FailPointMode::Times(1)) .error_code(91) .block_connection(Duration::from_secs(1)); let _guard = client.enable_fail_point(fail_point).await.unwrap(); @@ -179,7 +181,7 @@ async fn retry_read_different_mongos() { return; } - let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)) + let fail_point = FailPoint::fail_command(&["find"], FailPointMode::Times(1)) .error_code(6) .close_connection(true); guards.push(client.enable_fail_point(fail_point).await.unwrap()); @@ -240,7 +242,7 @@ async fn retry_read_same_mongos() { client_options.direct_connection = Some(true); let client = Client::test_builder().options(client_options).build().await; - let fail_point = FailPoint::new(&["find"], FailPointMode::Times(1)) + let fail_point = FailPoint::fail_command(&["find"], FailPointMode::Times(1)) .error_code(6) .close_connection(true); client.enable_fail_point(fail_point).await.unwrap() diff --git a/src/test/spec/retryable_writes.rs b/src/test/spec/retryable_writes.rs index c2484d8d7..f5b6ad66f 100644 --- a/src/test/spec/retryable_writes.rs +++ b/src/test/spec/retryable_writes.rs @@ -28,10 +28,12 @@ use crate::{ log_uncaptured, run_spec_test, spec::unified_runner::run_unified_tests, - util::{event_buffer::EventBuffer, get_default_name}, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + get_default_name, + }, Event, - FailPoint, - FailPointMode, TestClient, }, Client, @@ -421,7 +423,7 @@ async fn retry_write_pool_cleared() { .database("retry_write_pool_cleared") .collection("retry_write_pool_cleared"); - let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)) + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)) .error_code(91) .block_connection(Duration::from_secs(1)) .error_labels(vec![RETRYABLE_WRITE_ERROR]); @@ -509,13 +511,12 @@ async fn retry_write_retryable_write_error() { // Enable the failpoint. let fp_guard = { let client = client.lock().await; - let fail_point = - FailPoint::new(&["insert"], FailPointMode::Times(1)) - .error_code(10107) - .error_labels(vec![ - "RetryableWriteError", - "NoWritesPerformed", - ]); + let fail_point = FailPoint::fail_command( + &["insert"], + FailPointMode::Times(1), + ) + .error_code(10107) + .error_labels(vec!["RetryableWriteError", "NoWritesPerformed"]); client .as_ref() .unwrap() @@ -543,8 +544,8 @@ async fn retry_write_retryable_write_error() { return; } - let fail_point = - FailPoint::new(&["insert"], FailPointMode::Times(1)).write_concern_error(doc! { + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)) + .write_concern_error(doc! { "code": 91, "errorLabels": ["RetryableWriteError"], }); @@ -586,7 +587,7 @@ async fn retry_write_different_mongos() { return; } - let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)) + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)) .error_code(6) .error_labels(vec![RETRYABLE_WRITE_ERROR]) .close_connection(true); @@ -648,7 +649,7 @@ async fn retry_write_same_mongos() { client_options.direct_connection = Some(true); let client = Client::test_builder().options(client_options).build().await; - let fail_point = FailPoint::new(&["insert"], FailPointMode::Times(1)) + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)) .error_code(6) .error_labels(vec![RETRYABLE_WRITE_ERROR]) .close_connection(true); diff --git a/src/test/spec/retryable_writes/test_file.rs b/src/test/spec/retryable_writes/test_file.rs index b3c72b520..75aa5fc03 100644 --- a/src/test/spec/retryable_writes/test_file.rs +++ b/src/test/spec/retryable_writes/test_file.rs @@ -4,7 +4,7 @@ use super::super::{Operation, RunOn}; use crate::{ bson::{Bson, Document}, options::ClientOptions, - test::FailPoint, + test::util::fail_point::FailPoint, }; #[derive(Debug, Deserialize)] diff --git a/src/test/spec/sdam.rs b/src/test/spec/sdam.rs index 725f8e2dc..41c4ce06d 100644 --- a/src/test/spec/sdam.rs +++ b/src/test/spec/sdam.rs @@ -10,10 +10,11 @@ use crate::{ get_client_options, log_uncaptured, spec::unified_runner::run_unified_tests, - util::event_buffer::EventBuffer, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, Event, - FailPoint, - FailPointMode, TestClient, }, Client, @@ -203,7 +204,7 @@ async fn rtt_is_updated() { assert!(events.len() > 2); // configure a failpoint that blocks hello commands - let fail_point = FailPoint::new( + let fail_point = FailPoint::fail_command( &["hello", LEGACY_HELLO_COMMAND_NAME], FailPointMode::Times(1000), ) diff --git a/src/test/spec/transactions.rs b/src/test/spec/transactions.rs index 18ab3eddf..41a39d632 100644 --- a/src/test/spec/transactions.rs +++ b/src/test/spec/transactions.rs @@ -10,8 +10,7 @@ use crate::{ get_client_options, log_uncaptured, spec::{unified_runner::run_unified_tests, v2_runner::run_v2_tests}, - FailPoint, - FailPointMode, + util::fail_point::{FailPoint, FailPointMode}, TestClient, }, Client, @@ -210,7 +209,7 @@ async fn convenient_api_retry_timeout_commit_unknown() { .database("test_convenient") .collection::("test_convenient"); - let fail_point = FailPoint::new(&["commitTransaction"], FailPointMode::Times(1)) + let fail_point = FailPoint::fail_command(&["commitTransaction"], FailPointMode::Times(1)) .error_code(251) .error_labels(vec![UNKNOWN_TRANSACTION_COMMIT_RESULT]); let _guard = client.enable_fail_point(fail_point).await.unwrap(); @@ -255,7 +254,7 @@ async fn convenient_api_retry_timeout_commit_transient() { .database("test_convenient") .collection::("test_convenient"); - let fail_point = FailPoint::new(&["commitTransaction"], FailPointMode::Times(1)) + let fail_point = FailPoint::fail_command(&["commitTransaction"], FailPointMode::Times(1)) .error_code(251) .error_labels(vec![TRANSIENT_TRANSACTION_ERROR]); let _guard = client.enable_fail_point(fail_point).await.unwrap(); diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index 16df95733..8df981f8f 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -79,7 +79,7 @@ use crate::{ }, runtime, serde_util, - test::FailPoint, + test::util::fail_point::FailPoint, Collection, Database, IndexModel, diff --git a/src/test/spec/unified_runner/test_runner.rs b/src/test/spec/unified_runner/test_runner.rs index 6dd8818de..b1a256fda 100644 --- a/src/test/spec/unified_runner/test_runner.rs +++ b/src/test/spec/unified_runner/test_runner.rs @@ -21,7 +21,7 @@ use crate::{ test_file::{ExpectedEventType, TestFile}, }, update_options_for_testing, - util::FailPointGuard, + util::fail_point::FailPointGuard, TestClient, DEFAULT_URI, LOAD_BALANCED_MULTIPLE_URI, diff --git a/src/test/spec/v2_runner.rs b/src/test/spec/v2_runner.rs index ccf800319..d33b2f6de 100644 --- a/src/test/spec/v2_runner.rs +++ b/src/test/spec/v2_runner.rs @@ -23,8 +23,10 @@ use crate::{ get_client_options, log_uncaptured, spec::deserialize_spec_tests, - util::{get_default_name, FailPointGuard}, - FailPoint, + util::{ + fail_point::{FailPoint, FailPointGuard}, + get_default_name, + }, TestClient, SERVERLESS, }, diff --git a/src/test/spec/v2_runner/operation.rs b/src/test/spec/v2_runner/operation.rs index b0acbe791..cb41a2085 100644 --- a/src/test/spec/v2_runner/operation.rs +++ b/src/test/spec/v2_runner/operation.rs @@ -36,7 +36,7 @@ use crate::{ UpdateOptions, }, selection_criteria::{ReadPreference, SelectionCriteria}, - test::{assert_matches, log_uncaptured, FailPoint, TestClient}, + test::{assert_matches, log_uncaptured, util::fail_point::FailPoint, TestClient}, ClientSession, Collection, Database, diff --git a/src/test/spec/v2_runner/test_file.rs b/src/test/spec/v2_runner/test_file.rs index 0f1c1c988..3b684e252 100644 --- a/src/test/spec/v2_runner/test_file.rs +++ b/src/test/spec/v2_runner/test_file.rs @@ -11,8 +11,7 @@ use crate::{ test::{ log_uncaptured, spec::merge_uri_options, - util::is_expected_type, - FailPoint, + util::{fail_point::FailPoint, is_expected_type}, Serverless, TestClient, DEFAULT_URI, diff --git a/src/test/util.rs b/src/test/util.rs index 26f837ba8..fe33b7f48 100644 --- a/src/test/util.rs +++ b/src/test/util.rs @@ -1,6 +1,6 @@ mod event; pub(crate) mod event_buffer; -mod failpoint; +pub(crate) mod fail_point; mod matchable; #[cfg(feature = "tracing-unstable")] mod trace; @@ -9,7 +9,6 @@ mod trace; pub(crate) use self::event::EventClient; pub(crate) use self::{ event::Event, - failpoint::{FailPoint, FailPointGuard, FailPointMode}, matchable::{assert_matches, eq_matches, is_expected_type, MatchErrExt, Matchable}, }; diff --git a/src/test/util/failpoint.rs b/src/test/util/failpoint.rs deleted file mode 100644 index 69d178ded..000000000 --- a/src/test/util/failpoint.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::time::Duration; - -use serde::{Deserialize, Serialize}; - -use crate::{ - bson::{doc, Document}, - error::Result, - selection_criteria::{ReadPreference, SelectionCriteria}, - test::log_uncaptured, - Client, -}; - -impl Client { - /// Configure a fail point on this client. Any test that calls this method must use the - /// #[tokio::test(flavor = "multi_thread")] test annotation. The guard returned from this - /// method should remain in scope while the fail point is intended for use. Upon drop, the - /// guard will disable the fail point on the server. - pub(crate) async fn enable_fail_point(&self, fail_point: FailPoint) -> Result { - let command = bson::to_document(&fail_point)?; - self.database("admin") - .run_command(command) - .selection_criteria(fail_point.selection_criteria.clone()) - .await?; - - Ok(FailPointGuard { - client: self.clone(), - failure_type: fail_point.failure_type, - selection_criteria: fail_point.selection_criteria, - }) - } -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub(crate) struct FailPoint { - /// The type of failure to configure. The current valid values are "failCommand" and - /// "failGetMoreAfterCursorCheckout". - #[serde(rename = "configureFailPoint")] - failure_type: String, - - /// The fail point's mode. - mode: FailPointMode, - - /// The data associated with the fail point. This includes the commands that should fail and - /// the error information that should be returned. - #[serde(default)] - data: Document, - - /// The selection criteria to use when configuring this fail point. - #[serde(skip, default = "primary_selection_criteria")] - selection_criteria: SelectionCriteria, -} - -fn primary_selection_criteria() -> SelectionCriteria { - ReadPreference::Primary.into() -} - -impl FailPoint { - /// Creates a new failCommand FailPoint. Call the various builder methods on the returned - /// FailPoint to configure the type of failure that should occur. - pub(crate) fn new(command_names: &[&str], mode: FailPointMode) -> Self { - let data = doc! { "failCommands": command_names }; - Self { - failure_type: "failCommand".to_string(), - mode, - data, - selection_criteria: ReadPreference::Primary.into(), - } - } - - /// The appName that a client must use to hit this fail point. - pub(crate) fn app_name(mut self, app_name: impl Into) -> Self { - self.data.insert("appName", app_name.into()); - self - } - - /// How long the server should block the affected commands. Only available on 4.2.9+ servers. - pub(crate) fn block_connection(mut self, block_connection_duration: Duration) -> Self { - self.data.insert("blockConnection", true); - self.data - .insert("blockTimeMS", block_connection_duration.as_millis() as i64); - self - } - - /// Whether the server should close the connection when the client sends an affected command. - /// Defaults to false. - pub(crate) fn close_connection(mut self, close_connection: bool) -> Self { - self.data.insert("closeConnection", close_connection); - self - } - - /// The error code to include in the server's reply to an affected command. - pub(crate) fn error_code(mut self, error_code: i64) -> Self { - self.data.insert("errorCode", error_code); - self - } - - /// The error labels to include in the server's reply to an affected command. Note that the - /// value passed to this method will completely override the labels that the server would - /// otherwise return. Only available on 4.4+ servers. - pub(crate) fn error_labels( - mut self, - error_labels: impl IntoIterator>, - ) -> Self { - let error_labels: Vec = error_labels.into_iter().map(Into::into).collect(); - self.data.insert("errorLabels", error_labels); - self - } - - /// The write concern error to include in the server's reply to an affected command. - pub(crate) fn write_concern_error(mut self, write_concern_error: Document) -> Self { - self.data.insert("writeConcernError", write_concern_error); - self - } - - /// The selection criteria to use when enabling this fail point. Defaults to a primary read - /// preference if unspecified. - pub(crate) fn selection_criteria(mut self, selection_criteria: SelectionCriteria) -> Self { - self.selection_criteria = selection_criteria; - self - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[allow(unused)] -pub(crate) enum FailPointMode { - AlwaysOn, - Times(i32), - Skip(i32), - Off, - ActivationProbability(f32), -} - -#[derive(Debug)] -#[must_use] -pub(crate) struct FailPointGuard { - client: Client, - failure_type: String, - selection_criteria: SelectionCriteria, -} - -impl Drop for FailPointGuard { - fn drop(&mut self) { - let client = self.client.clone(); - - // This forces the Tokio runtime to not finish shutdown until this future has completed. - // Unfortunately, this also means that tests using FailPointGuards have to use the - // multi-threaded runtime. - let result = tokio::task::block_in_place(|| { - futures::executor::block_on(async move { - client - .database("admin") - .run_command( - doc! { "configureFailPoint": self.failure_type.clone(), "mode": "off" }, - ) - .selection_criteria(self.selection_criteria.clone()) - .await - }) - }); - - if let Err(error) = result { - log_uncaptured(format!("failed disabling failpoint: {:?}", error)); - } - } -} From a2c52e8586f9b40b34c9f6fa00c3487c8157abdc Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 18 Apr 2024 11:08:24 -0600 Subject: [PATCH 56/75] add file --- src/test/util/fail_point.rs | 165 ++++++++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 src/test/util/fail_point.rs diff --git a/src/test/util/fail_point.rs b/src/test/util/fail_point.rs new file mode 100644 index 000000000..5eae96a6a --- /dev/null +++ b/src/test/util/fail_point.rs @@ -0,0 +1,165 @@ +use std::time::Duration; + +use serde::{Deserialize, Serialize}; + +use crate::{ + bson::{doc, Document}, + error::Result, + selection_criteria::{ReadPreference, SelectionCriteria}, + test::log_uncaptured, + Client, +}; + +impl Client { + /// Configure a fail point on this client. Any test that calls this method must use the + /// #[tokio::test(flavor = "multi_thread")] test annotation. The guard returned from this + /// method should remain in scope while the fail point is intended for use. Upon drop, the + /// guard will disable the fail point on the server. + pub(crate) async fn enable_fail_point(&self, fail_point: FailPoint) -> Result { + let command = bson::to_document(&fail_point)?; + self.database("admin") + .run_command(command) + .selection_criteria(fail_point.selection_criteria.clone()) + .await?; + + Ok(FailPointGuard { + client: self.clone(), + failure_type: fail_point.failure_type, + selection_criteria: fail_point.selection_criteria, + }) + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct FailPoint { + /// The type of failure to configure. The current valid values are "failCommand" and + /// "failGetMoreAfterCursorCheckout". + #[serde(rename = "configureFailPoint")] + failure_type: String, + + /// The fail point's mode. + mode: FailPointMode, + + /// The data associated with the fail point. This includes the commands that should fail and + /// the error information that should be returned. + #[serde(default)] + data: Document, + + /// The selection criteria to use when configuring this fail point. + #[serde(skip, default = "primary_selection_criteria")] + selection_criteria: SelectionCriteria, +} + +fn primary_selection_criteria() -> SelectionCriteria { + ReadPreference::Primary.into() +} + +impl FailPoint { + /// Creates a new failCommand FailPoint. Call the various builder methods on the returned + /// FailPoint to configure the type of failure that should occur. + pub(crate) fn fail_command(command_names: &[&str], mode: FailPointMode) -> Self { + let data = doc! { "failCommands": command_names }; + Self { + failure_type: "failCommand".to_string(), + mode, + data, + selection_criteria: ReadPreference::Primary.into(), + } + } + + /// The appName that a client must use to hit this fail point. + pub(crate) fn app_name(mut self, app_name: impl Into) -> Self { + self.data.insert("appName", app_name.into()); + self + } + + /// How long the server should block the affected commands. Only available on 4.2.9+ servers. + pub(crate) fn block_connection(mut self, block_connection_duration: Duration) -> Self { + self.data.insert("blockConnection", true); + self.data + .insert("blockTimeMS", block_connection_duration.as_millis() as i64); + self + } + + /// Whether the server should close the connection when the client sends an affected command. + /// Defaults to false. + pub(crate) fn close_connection(mut self, close_connection: bool) -> Self { + self.data.insert("closeConnection", close_connection); + self + } + + /// The error code to include in the server's reply to an affected command. + pub(crate) fn error_code(mut self, error_code: i64) -> Self { + self.data.insert("errorCode", error_code); + self + } + + /// The error labels to include in the server's reply to an affected command. Note that the + /// value passed to this method will completely override the labels that the server would + /// otherwise return. Only available on 4.4+ servers. + pub(crate) fn error_labels( + mut self, + error_labels: impl IntoIterator>, + ) -> Self { + let error_labels: Vec = error_labels.into_iter().map(Into::into).collect(); + self.data.insert("errorLabels", error_labels); + self + } + + /// The write concern error to include in the server's reply to an affected command. + pub(crate) fn write_concern_error(mut self, write_concern_error: Document) -> Self { + self.data.insert("writeConcernError", write_concern_error); + self + } + + /// The selection criteria to use when enabling this fail point. Defaults to a primary read + /// preference if unspecified. + pub(crate) fn selection_criteria(mut self, selection_criteria: SelectionCriteria) -> Self { + self.selection_criteria = selection_criteria; + self + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(unused)] +pub(crate) enum FailPointMode { + AlwaysOn, + Times(i32), + Skip(i32), + Off, + ActivationProbability(f32), +} + +#[derive(Debug)] +#[must_use] +pub(crate) struct FailPointGuard { + client: Client, + failure_type: String, + selection_criteria: SelectionCriteria, +} + +impl Drop for FailPointGuard { + fn drop(&mut self) { + let client = self.client.clone(); + + // This forces the Tokio runtime to not finish shutdown until this future has completed. + // Unfortunately, this also means that tests using FailPointGuards have to use the + // multi-threaded runtime. + let result = tokio::task::block_in_place(|| { + futures::executor::block_on(async move { + client + .database("admin") + .run_command( + doc! { "configureFailPoint": self.failure_type.clone(), "mode": "off" }, + ) + .selection_criteria(self.selection_criteria.clone()) + .await + }) + }); + + if let Err(error) = result { + log_uncaptured(format!("failed disabling failpoint: {:?}", error)); + } + } +} From cef01ede902682542597f7ec8e85a5c54cf38011 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 19 Apr 2024 12:20:24 -0600 Subject: [PATCH 57/75] small cleanup --- src/results.rs | 9 ++++----- src/test/util/fail_point.rs | 1 - 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/results.rs b/src/results.rs index 0d63df542..4e8a82e84 100644 --- a/src/results.rs +++ b/src/results.rs @@ -4,19 +4,18 @@ mod bulk_write; use std::collections::{HashMap, VecDeque}; +use serde::{Deserialize, Serialize}; +use serde_with::skip_serializing_none; + use crate::{ - bson::{serde_helpers, Bson, Document}, + bson::{serde_helpers, Binary, Bson, Document, RawDocumentBuf}, change_stream::event::ResumeToken, db::options::CreateCollectionOptions, serde_util, Namespace, }; -use bson::{Binary, RawDocumentBuf}; -use serde::{Deserialize, Serialize}; - pub use bulk_write::*; -use serde_with::skip_serializing_none; /// The result of a [`Collection::insert_one`](../struct.Collection.html#method.insert_one) /// operation. diff --git a/src/test/util/fail_point.rs b/src/test/util/fail_point.rs index 5eae96a6a..d1d88a056 100644 --- a/src/test/util/fail_point.rs +++ b/src/test/util/fail_point.rs @@ -122,7 +122,6 @@ impl FailPoint { #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -#[allow(unused)] pub(crate) enum FailPointMode { AlwaysOn, Times(i32), From 5ee0bd6c7eb801d410a3eba337aaebfcdd7aa459 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 22 Apr 2024 09:53:53 -0600 Subject: [PATCH 58/75] add namespace batching test --- src/test/bulk_write.rs | 53 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 646759c53..3d2eec069 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -383,3 +383,56 @@ async fn cursor_iteration_in_a_transaction() { let command_started_events = client.events.get_command_started_events(&["getMore"]); assert_eq!(command_started_events.len(), 1); } + +#[tokio::test] +async fn namespace_batching() { + let client = Client::test_builder().monitor_events().build().await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping namespace_batching: bulkWrite requires 8.0+"); + return; + } + + let max_write_batch_size = client.server_info.max_write_batch_size.unwrap(); + + let mut models = vec![ + WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document: doc! { "a": "b" } + }; + max_write_batch_size as usize + ]; + models.push(WriteModel::InsertOne { + namespace: Namespace::new("db", "coll1"), + document: doc! { "a": "b" }, + }); + + let result = client.bulk_write(models).await.unwrap(); + assert_eq!(result.inserted_count, max_write_batch_size + 1); + + let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); + + let first_ns_info = command_started_events[0] + .command + .get_array("nsInfo") + .unwrap(); + assert_eq!(first_ns_info.len(), 1); + let first_ns = first_ns_info[0] + .as_document() + .unwrap() + .get_str("ns") + .unwrap(); + assert_eq!(first_ns, "db.coll"); + + let second_ns_info = command_started_events[1] + .command + .get_array("nsInfo") + .unwrap(); + assert_eq!(second_ns_info.len(), 1); + let second_ns = second_ns_info[0] + .as_document() + .unwrap() + .get_str("ns") + .unwrap(); + assert_eq!(second_ns, "db.coll1"); +} From 9ad9e886b731d68999ec7cf7afaa4402251e0d44 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Wed, 24 Apr 2024 09:44:13 -0600 Subject: [PATCH 59/75] retry getMore --- src/client/executor.rs | 2 +- src/client/session/test/causal_consistency.rs | 2 +- src/operation/bulk_write.rs | 11 +- src/test/bulk_write.rs | 103 +++++++++++++++++- src/test/change_stream.rs | 8 +- src/test/cursor.rs | 2 +- src/test/spec/retryable_reads.rs | 4 +- src/test/spec/retryable_writes.rs | 4 +- src/test/util/event.rs | 9 +- src/test/util/event_buffer.rs | 20 +++- 10 files changed, 146 insertions(+), 19 deletions(-) diff --git a/src/client/executor.rs b/src/client/executor.rs index 815400dff..35beb2040 100644 --- a/src/client/executor.rs +++ b/src/client/executor.rs @@ -1043,7 +1043,7 @@ impl Error { } } - if let Some(ref mut session) = session { + if let Some(session) = session { if self.contains_label(TRANSIENT_TRANSACTION_ERROR) || self.contains_label(UNKNOWN_TRANSACTION_COMMIT_RESULT) { diff --git a/src/client/session/test/causal_consistency.rs b/src/client/session/test/causal_consistency.rs index f36fbda17..4592e168f 100644 --- a/src/client/session/test/causal_consistency.rs +++ b/src/client/session/test/causal_consistency.rs @@ -201,7 +201,7 @@ async fn first_op_update_op_time() { let event = { let mut events = client.events.clone(); events - .get_command_events(&[name]) + .get_command_events_mut(&[name]) .into_iter() .find(|e| matches!(e, CommandEvent::Succeeded(_) | CommandEvent::Failed(_))) .unwrap_or_else(|| panic!("no event found for {}", name)) diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 1952aa72f..f34e28578 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -11,7 +11,7 @@ use crate::{ checked::Checked, cmap::{Command, RawCommandResponse, StreamDescription}, cursor::CursorSpecification, - error::{ClientBulkWriteError, Error, ErrorKind, Result}, + error::{ClientBulkWriteError, Error, ErrorKind, Result, RETRYABLE_WRITE_ERROR}, operation::OperationWithDefaults, options::{BulkWriteOptions, OperationType, WriteModel}, results::{BulkWriteResult, DeleteResult, InsertOneResult, UpdateResult}, @@ -276,7 +276,7 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { context: ExecutionContext<'b>, ) -> BoxFuture<'b, Result> { async move { - let response: WriteResponseBody = response.body()?; + let mut response: WriteResponseBody = response.body()?; let mut bulk_write_error = ClientBulkWriteError::default(); @@ -345,6 +345,13 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { } } Err(error) => { + // Retry the entire bulkWrite command if cursor iteration fails. + let labels = response.labels.get_or_insert_with(Default::default); + let retryable_write_error = RETRYABLE_WRITE_ERROR.to_string(); + if !labels.contains(&retryable_write_error) { + labels.push(retryable_write_error); + } + let error = Error::new( ErrorKind::ClientBulkWrite(bulk_write_error), response.labels, diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 3d2eec069..38582874c 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -1,5 +1,5 @@ use crate::{ - bson::doc, + bson::{doc, Document}, error::{ClientBulkWriteError, ErrorKind}, options::WriteModel, test::{ @@ -91,6 +91,11 @@ async fn max_message_size_bytes_batching() { let first_event = command_started_events .next() .expect("no first event observed"); + + let mut command = first_event.command.clone(); + command.remove("ops"); + dbg!("{}", command); + let first_len = first_event.command.get_array("ops").unwrap().len(); assert_eq!(first_len, num_models - 1); @@ -217,7 +222,7 @@ async fn successful_cursor_iteration() { let max_bson_object_size = client.server_info.max_bson_object_size as usize; - let collection = client.database("db").collection::("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); let models = vec![ @@ -256,6 +261,7 @@ async fn successful_cursor_iteration() { #[tokio::test(flavor = "multi_thread")] async fn failed_cursor_iteration() { let mut options = get_client_options().await.clone(); + options.retry_writes = Some(false); if TestClient::new().await.is_sharded() { options.hosts.drain(1..); } @@ -275,7 +281,7 @@ async fn failed_cursor_iteration() { let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(8); let _guard = client.enable_fail_point(fail_point).await.unwrap(); - let collection = client.database("db").collection::("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); let models = vec![ @@ -344,7 +350,7 @@ async fn cursor_iteration_in_a_transaction() { let max_bson_object_size = client.server_info.max_bson_object_size as usize; - let collection = client.database("db").collection::("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); let mut session = client.start_session().await.unwrap(); @@ -436,3 +442,92 @@ async fn namespace_batching() { .unwrap(); assert_eq!(second_ns, "db.coll1"); } + +#[tokio::test(flavor = "multi_thread")] +async fn get_more_is_retried() { + let mut options = get_client_options().await.clone(); + if TestClient::new().await.is_sharded() { + options.hosts.drain(1..); + } + let client = Client::test_builder() + .options(options) + .monitor_events() + .build() + .await; + + if client.server_version_lt(8, 0) || client.is_standalone() { + log_uncaptured( + "skipping get_more_is_retried: bulkWrite requires 8.0+, retryable writes require \ + non-standalone", + ); + return; + } + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(6); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); + + let collection = client.database("db").collection::("coll"); + collection.drop().await.unwrap(); + + let models = vec![ + WriteModel::ReplaceOne { + namespace: collection.namespace(), + filter: doc! { "_id": "a".repeat(max_bson_object_size / 2) }, + replacement: doc! { "x": 1 }, + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + WriteModel::ReplaceOne { + namespace: collection.namespace(), + filter: doc! { "_id": "b".repeat(max_bson_object_size / 2) }, + replacement: doc! { "x": 1 }, + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + ]; + + let _ = client.bulk_write(models).verbose_results(true).await; + + let mut command_events = client + .events + .get_command_events(&["bulkWrite", "getMore"]) + .into_iter(); + + let bulk_write_event = command_events.next().unwrap(); + let started_event = bulk_write_event.as_command_started().unwrap(); + assert_eq!(started_event.command_name, "bulkWrite"); + + let bulk_write_event = command_events.next().unwrap(); + let succeeded_event = bulk_write_event.as_command_succeeded().unwrap(); + assert_eq!(succeeded_event.command_name, "bulkWrite"); + + let get_more_event = command_events.next().unwrap(); + let started_event = get_more_event.as_command_started().unwrap(); + assert_eq!(started_event.command_name, "getMore"); + + let get_more_event = command_events.next().unwrap(); + let failed_event = get_more_event.as_command_failed().unwrap(); + assert_eq!(failed_event.command_name, "getMore"); + + let bulk_write_event = command_events.next().unwrap(); + let started_event = bulk_write_event.as_command_started().unwrap(); + assert_eq!(started_event.command_name, "bulkWrite"); + + let bulk_write_event = command_events.next().unwrap(); + let succeeded_event = bulk_write_event.as_command_succeeded().unwrap(); + assert_eq!(succeeded_event.command_name, "bulkWrite"); + + let get_more_event = command_events.next().unwrap(); + let started_event = get_more_event.as_command_started().unwrap(); + assert_eq!(started_event.command_name, "getMore"); + + let get_more_event = command_events.next().unwrap(); + let succeeded_event = get_more_event.as_command_succeeded().unwrap(); + assert_eq!(succeeded_event.command_name, "getMore"); +} diff --git a/src/test/change_stream.rs b/src/test/change_stream.rs index 19bea9e30..f8040892b 100644 --- a/src/test/change_stream.rs +++ b/src/test/change_stream.rs @@ -86,7 +86,7 @@ async fn tracks_resume_token() -> Result<()> { let events: Vec<_> = { let mut events = client.events.clone(); events - .get_command_events(&["aggregate", "getMore"]) + .get_command_events_mut(&["aggregate", "getMore"]) .into_iter() .filter_map(|ev| match ev { CommandEvent::Succeeded(s) => Some(s), @@ -232,7 +232,7 @@ async fn empty_batch_not_closed() -> Result<()> { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events(&["aggregate", "getMore"]) + events.get_command_events_mut(&["aggregate", "getMore"]) }; let cursor_id = match &events[1] { CommandEvent::Succeeded(CommandSucceededEvent { reply, .. }) => { @@ -318,7 +318,7 @@ async fn resume_start_at_operation_time() -> Result<()> { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events(&["aggregate"]) + events.get_command_events_mut(&["aggregate"]) }; assert_eq!(events.len(), 4); @@ -365,7 +365,7 @@ async fn batch_end_resume_token() -> Result<()> { #[allow(deprecated)] let commands = { let mut events = client.events.clone(); - events.get_command_events(&["aggregate", "getMore"]) + events.get_command_events_mut(&["aggregate", "getMore"]) }; assert!(matches!(commands.last(), Some( CommandEvent::Succeeded(CommandSucceededEvent { diff --git a/src/test/cursor.rs b/src/test/cursor.rs index f8cd61c66..f1b5f3b96 100644 --- a/src/test/cursor.rs +++ b/src/test/cursor.rs @@ -142,7 +142,7 @@ async fn batch_exhaustion() { let replies: Vec<_> = { let mut events = client.events.clone(); events - .get_command_events(&["getMore"]) + .get_command_events_mut(&["getMore"]) .into_iter() .filter_map(|e| e.as_command_succeeded().map(|e| e.reply.clone())) .collect() diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index b36ece236..135ab5fcb 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -202,7 +202,7 @@ async fn retry_read_different_mongos() { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events(&["find"]) + events.get_command_events_mut(&["find"]) }; assert!( matches!( @@ -263,7 +263,7 @@ async fn retry_read_same_mongos() { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events(&["find"]) + events.get_command_events_mut(&["find"]) }; assert!( matches!( diff --git a/src/test/spec/retryable_writes.rs b/src/test/spec/retryable_writes.rs index 9ab860288..0ddaa9b10 100644 --- a/src/test/spec/retryable_writes.rs +++ b/src/test/spec/retryable_writes.rs @@ -603,7 +603,7 @@ async fn retry_write_different_mongos() { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events(&["insert"]) + events.get_command_events_mut(&["insert"]) }; assert!( matches!( @@ -665,7 +665,7 @@ async fn retry_write_same_mongos() { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events(&["insert"]) + events.get_command_events_mut(&["insert"]) }; assert!( matches!( diff --git a/src/test/util/event.rs b/src/test/util/event.rs index 1198f4ce1..fa2d7e9ed 100644 --- a/src/test/util/event.rs +++ b/src/test/util/event.rs @@ -6,7 +6,7 @@ use crate::{ bson::doc, event::{ cmap::CmapEvent, - command::{CommandEvent, CommandStartedEvent, CommandSucceededEvent}, + command::{CommandEvent, CommandFailedEvent, CommandStartedEvent, CommandSucceededEvent}, sdam::SdamEvent, }, test::get_client_options, @@ -110,6 +110,13 @@ impl CommandEvent { _ => None, } } + + pub(crate) fn as_command_failed(&self) -> Option<&CommandFailedEvent> { + match self { + CommandEvent::Failed(e) => Some(e), + _ => None, + } + } } #[derive(Clone, Debug)] diff --git a/src/test/util/event_buffer.rs b/src/test/util/event_buffer.rs index 4da7b9f0f..fdefa861d 100644 --- a/src/test/util/event_buffer.rs +++ b/src/test/util/event_buffer.rs @@ -232,10 +232,28 @@ impl EventBuffer { .collect() } + pub(crate) fn get_command_events(&self, command_names: &[&str]) -> Vec { + self.inner + .events + .lock() + .unwrap() + .data + .iter() + .filter_map(|(event, _)| match event { + Event::Command(command_event) + if command_names.contains(&command_event.command_name()) => + { + Some(command_event.clone()) + } + _ => None, + }) + .collect() + } + /// Remove all command events from the buffer, returning those matching any of the command /// names. #[deprecated = "use immutable methods"] - pub(crate) fn get_command_events(&mut self, command_names: &[&str]) -> Vec { + pub(crate) fn get_command_events_mut(&mut self, command_names: &[&str]) -> Vec { let mut out = vec![]; self.retain(|ev| match ev { Event::Command(cev) => { From f99b736d47a8d5d60ad6bc2dd0914a13c47a08d7 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 25 Apr 2024 10:08:27 -0600 Subject: [PATCH 60/75] rework batch splitting sizes --- src/operation.rs | 5 ++-- src/operation/bulk_write.rs | 54 +++++++++++++++++++------------------ src/operation/insert.rs | 37 ++++++++++++------------- 3 files changed, 50 insertions(+), 46 deletions(-) diff --git a/src/operation.rs b/src/operation.rs index 252a64c43..0e08a1d34 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -78,8 +78,9 @@ const SERVER_4_4_0_WIRE_VERSION: i32 = 9; // The maximum number of bytes that may be included in a write payload when auto-encryption is // enabled. const MAX_ENCRYPTED_WRITE_SIZE: usize = 2_097_152; -// The amount of overhead bytes to account for when building a document sequence. -const COMMAND_OVERHEAD_SIZE: usize = 16_000; +// The amount of message overhead (OP_MSG bytes and command-agnostic fields) to account for when +// building a multi-write operation using document sequences. +const OP_MSG_OVERHEAD_BYTES: usize = 1_000; /// Context about the execution of the operation. pub(crate) struct ExecutionContext<'a> { diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index f34e28578..c3eafa881 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -26,8 +26,8 @@ use super::{ ExecutionContext, Retryability, WriteResponseBody, - COMMAND_OVERHEAD_SIZE, MAX_ENCRYPTED_WRITE_SIZE, + OP_MSG_OVERHEAD_BYTES, }; use server_responses::*; @@ -188,15 +188,24 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { const NAME: &'static str = "bulkWrite"; fn build(&mut self, description: &StreamDescription) -> Result> { - let max_operations: usize = Checked::new(description.max_write_batch_size).try_into()?; let max_doc_size: usize = Checked::new(description.max_bson_object_size).try_into()?; - let max_message_size = Checked::new(description.max_message_size_bytes) - .try_into::()? - - COMMAND_OVERHEAD_SIZE; + let max_message_size: usize = + Checked::new(description.max_message_size_bytes).try_into()?; + let max_operations: usize = Checked::new(description.max_write_batch_size).try_into()?; + + let mut command_body = rawdoc! { Self::NAME: 1 }; + let options = match self.options { + Some(options) => bson::to_raw_document_buf(options), + None => bson::to_raw_document_buf(&BulkWriteOptions::default()), + }?; + bson_util::extend_raw_document_buf(&mut command_body, options)?; + + let max_document_sequences_size = + max_message_size - OP_MSG_OVERHEAD_BYTES - command_body.as_bytes().len(); let mut namespace_info = NamespaceInfo::new(); let mut ops = Vec::new(); - let mut size = 0; + let mut current_size = 0; for (i, model) in self.models.iter().take(max_operations).enumerate() { let (namespace_index, namespace_size) = namespace_info.get_index(model.namespace()); @@ -217,10 +226,6 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { .into()); } - if let Some(inserted_id) = inserted_id { - self.inserted_ids.insert(i, inserted_id); - } - let mut split = false; if self.encrypted && i != 0 { let model_entry_size = array_entry_size_bytes(i, operation_size)?; @@ -229,10 +234,11 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { } else { 0 }; - if size + model_entry_size + namespace_entry_size > MAX_ENCRYPTED_WRITE_SIZE { + if current_size + namespace_entry_size + model_entry_size > MAX_ENCRYPTED_WRITE_SIZE + { split = true; } - } else if size + namespace_size + operation_size > max_message_size { + } else if current_size + namespace_size + operation_size > max_document_sequences_size { split = true; } @@ -243,27 +249,23 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { namespace_info.namespaces.remove(last_index); } break; - } else { - size += namespace_size + operation_size; - ops.push(operation); } - } - let mut body = rawdoc! { Self::NAME: 1 }; - let options = match self.options { - Some(options) => bson::to_raw_document_buf(options), - None => bson::to_raw_document_buf(&BulkWriteOptions::default()), - }?; - bson_util::extend_raw_document_buf(&mut body, options)?; + if let Some(inserted_id) = inserted_id { + self.inserted_ids.insert(i, inserted_id); + } + current_size += namespace_size + operation_size; + ops.push(operation); + } self.n_attempted = ops.len(); if self.encrypted { - body.append("nsInfo", vec_to_raw_array_buf(namespace_info.namespaces)); - body.append("ops", vec_to_raw_array_buf(ops)); - Ok(Command::new(Self::NAME, "admin", body)) + command_body.append("nsInfo", vec_to_raw_array_buf(namespace_info.namespaces)); + command_body.append("ops", vec_to_raw_array_buf(ops)); + Ok(Command::new(Self::NAME, "admin", command_body)) } else { - let mut command = Command::new(Self::NAME, "admin", body); + let mut command = Command::new(Self::NAME, "admin", command_body); command.add_document_sequence("nsInfo", namespace_info.namespaces); command.add_document_sequence("ops", ops); Ok(command) diff --git a/src/operation/insert.rs b/src/operation/insert.rs index fd58b8f27..7390b20cf 100644 --- a/src/operation/insert.rs +++ b/src/operation/insert.rs @@ -20,7 +20,7 @@ use crate::{ Namespace, }; -use super::{ExecutionContext, COMMAND_OVERHEAD_SIZE, MAX_ENCRYPTED_WRITE_SIZE}; +use super::{ExecutionContext, MAX_ENCRYPTED_WRITE_SIZE, OP_MSG_OVERHEAD_BYTES}; #[derive(Debug)] pub(crate) struct Insert<'a> { @@ -60,25 +60,26 @@ impl<'a> OperationWithDefaults for Insert<'a> { const NAME: &'static str = "insert"; fn build(&mut self, description: &StreamDescription) -> Result> { + let max_doc_size: usize = Checked::new(description.max_bson_object_size).try_into()?; + let max_message_size: usize = + Checked::new(description.max_message_size_bytes).try_into()?; + let max_operations: usize = Checked::new(description.max_write_batch_size).try_into()?; + + let mut command_body = rawdoc! { Self::NAME: self.ns.coll.clone() }; + let options = bson::to_raw_document_buf(&self.options)?; + extend_raw_document_buf(&mut command_body, options)?; + + let max_document_sequence_size = + max_message_size - OP_MSG_OVERHEAD_BYTES - command_body.as_bytes().len(); + let mut docs = Vec::new(); - let mut size = 0; - - let max_doc_size = Checked::::try_from(description.max_bson_object_size)?; - let max_doc_sequence_size = - Checked::::try_from(description.max_message_size_bytes)? - COMMAND_OVERHEAD_SIZE; - let max_write_batch_size = Checked::::try_from(description.max_write_batch_size)?; - - for (i, document) in self - .documents - .iter() - .take(max_write_batch_size.get()?) - .enumerate() - { + let mut current_size = 0; + for (i, document) in self.documents.iter().take(max_operations).enumerate() { let mut document = bson::to_raw_document_buf(document)?; let id = get_or_prepend_id_field(&mut document)?; let doc_size = document.as_bytes().len(); - if doc_size > max_doc_size.get()? { + if doc_size > max_doc_size { return Err(ErrorKind::InvalidArgument { message: format!( "insert document must be within {} bytes, but document provided is {} \ @@ -94,16 +95,16 @@ impl<'a> OperationWithDefaults for Insert<'a> { // than `maxBsonObjectSize`) proceed with automatic encryption. if self.encrypted && i != 0 { let doc_entry_size = array_entry_size_bytes(i, document.as_bytes().len())?; - if (Checked::new(size) + doc_entry_size).get()? >= MAX_ENCRYPTED_WRITE_SIZE { + if current_size + doc_entry_size >= MAX_ENCRYPTED_WRITE_SIZE { break; } - } else if (Checked::new(size) + doc_size).get()? > max_doc_sequence_size.get()? { + } else if current_size + doc_size > max_document_sequence_size { break; } self.inserted_ids.push(id); docs.push(document); - size += doc_size; + current_size += doc_size; } let mut body = rawdoc! { From 0e8937838dc43807b6c57eb80fae861a13c3876b Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 25 Apr 2024 10:13:22 -0600 Subject: [PATCH 61/75] Revert "retry getMore" This reverts commit 9ad9e886b731d68999ec7cf7afaa4402251e0d44. --- src/client/executor.rs | 2 +- src/client/session/test/causal_consistency.rs | 2 +- src/operation/bulk_write.rs | 11 +- src/test/bulk_write.rs | 103 +----------------- src/test/change_stream.rs | 8 +- src/test/cursor.rs | 2 +- src/test/spec/retryable_reads.rs | 4 +- src/test/spec/retryable_writes.rs | 4 +- src/test/util/event.rs | 9 +- src/test/util/event_buffer.rs | 20 +--- 10 files changed, 19 insertions(+), 146 deletions(-) diff --git a/src/client/executor.rs b/src/client/executor.rs index 35beb2040..815400dff 100644 --- a/src/client/executor.rs +++ b/src/client/executor.rs @@ -1043,7 +1043,7 @@ impl Error { } } - if let Some(session) = session { + if let Some(ref mut session) = session { if self.contains_label(TRANSIENT_TRANSACTION_ERROR) || self.contains_label(UNKNOWN_TRANSACTION_COMMIT_RESULT) { diff --git a/src/client/session/test/causal_consistency.rs b/src/client/session/test/causal_consistency.rs index 4592e168f..f36fbda17 100644 --- a/src/client/session/test/causal_consistency.rs +++ b/src/client/session/test/causal_consistency.rs @@ -201,7 +201,7 @@ async fn first_op_update_op_time() { let event = { let mut events = client.events.clone(); events - .get_command_events_mut(&[name]) + .get_command_events(&[name]) .into_iter() .find(|e| matches!(e, CommandEvent::Succeeded(_) | CommandEvent::Failed(_))) .unwrap_or_else(|| panic!("no event found for {}", name)) diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index c3eafa881..131922811 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -11,7 +11,7 @@ use crate::{ checked::Checked, cmap::{Command, RawCommandResponse, StreamDescription}, cursor::CursorSpecification, - error::{ClientBulkWriteError, Error, ErrorKind, Result, RETRYABLE_WRITE_ERROR}, + error::{ClientBulkWriteError, Error, ErrorKind, Result}, operation::OperationWithDefaults, options::{BulkWriteOptions, OperationType, WriteModel}, results::{BulkWriteResult, DeleteResult, InsertOneResult, UpdateResult}, @@ -278,7 +278,7 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { context: ExecutionContext<'b>, ) -> BoxFuture<'b, Result> { async move { - let mut response: WriteResponseBody = response.body()?; + let response: WriteResponseBody = response.body()?; let mut bulk_write_error = ClientBulkWriteError::default(); @@ -347,13 +347,6 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { } } Err(error) => { - // Retry the entire bulkWrite command if cursor iteration fails. - let labels = response.labels.get_or_insert_with(Default::default); - let retryable_write_error = RETRYABLE_WRITE_ERROR.to_string(); - if !labels.contains(&retryable_write_error) { - labels.push(retryable_write_error); - } - let error = Error::new( ErrorKind::ClientBulkWrite(bulk_write_error), response.labels, diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 38582874c..3d2eec069 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -1,5 +1,5 @@ use crate::{ - bson::{doc, Document}, + bson::doc, error::{ClientBulkWriteError, ErrorKind}, options::WriteModel, test::{ @@ -91,11 +91,6 @@ async fn max_message_size_bytes_batching() { let first_event = command_started_events .next() .expect("no first event observed"); - - let mut command = first_event.command.clone(); - command.remove("ops"); - dbg!("{}", command); - let first_len = first_event.command.get_array("ops").unwrap().len(); assert_eq!(first_len, num_models - 1); @@ -222,7 +217,7 @@ async fn successful_cursor_iteration() { let max_bson_object_size = client.server_info.max_bson_object_size as usize; - let collection = client.database("db").collection::("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); let models = vec![ @@ -261,7 +256,6 @@ async fn successful_cursor_iteration() { #[tokio::test(flavor = "multi_thread")] async fn failed_cursor_iteration() { let mut options = get_client_options().await.clone(); - options.retry_writes = Some(false); if TestClient::new().await.is_sharded() { options.hosts.drain(1..); } @@ -281,7 +275,7 @@ async fn failed_cursor_iteration() { let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(8); let _guard = client.enable_fail_point(fail_point).await.unwrap(); - let collection = client.database("db").collection::("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); let models = vec![ @@ -350,7 +344,7 @@ async fn cursor_iteration_in_a_transaction() { let max_bson_object_size = client.server_info.max_bson_object_size as usize; - let collection = client.database("db").collection::("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); let mut session = client.start_session().await.unwrap(); @@ -442,92 +436,3 @@ async fn namespace_batching() { .unwrap(); assert_eq!(second_ns, "db.coll1"); } - -#[tokio::test(flavor = "multi_thread")] -async fn get_more_is_retried() { - let mut options = get_client_options().await.clone(); - if TestClient::new().await.is_sharded() { - options.hosts.drain(1..); - } - let client = Client::test_builder() - .options(options) - .monitor_events() - .build() - .await; - - if client.server_version_lt(8, 0) || client.is_standalone() { - log_uncaptured( - "skipping get_more_is_retried: bulkWrite requires 8.0+, retryable writes require \ - non-standalone", - ); - return; - } - - let max_bson_object_size = client.server_info.max_bson_object_size as usize; - - let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(6); - let _guard = client.enable_fail_point(fail_point).await.unwrap(); - - let collection = client.database("db").collection::("coll"); - collection.drop().await.unwrap(); - - let models = vec![ - WriteModel::ReplaceOne { - namespace: collection.namespace(), - filter: doc! { "_id": "a".repeat(max_bson_object_size / 2) }, - replacement: doc! { "x": 1 }, - array_filters: None, - collation: None, - hint: None, - upsert: Some(true), - }, - WriteModel::ReplaceOne { - namespace: collection.namespace(), - filter: doc! { "_id": "b".repeat(max_bson_object_size / 2) }, - replacement: doc! { "x": 1 }, - array_filters: None, - collation: None, - hint: None, - upsert: Some(true), - }, - ]; - - let _ = client.bulk_write(models).verbose_results(true).await; - - let mut command_events = client - .events - .get_command_events(&["bulkWrite", "getMore"]) - .into_iter(); - - let bulk_write_event = command_events.next().unwrap(); - let started_event = bulk_write_event.as_command_started().unwrap(); - assert_eq!(started_event.command_name, "bulkWrite"); - - let bulk_write_event = command_events.next().unwrap(); - let succeeded_event = bulk_write_event.as_command_succeeded().unwrap(); - assert_eq!(succeeded_event.command_name, "bulkWrite"); - - let get_more_event = command_events.next().unwrap(); - let started_event = get_more_event.as_command_started().unwrap(); - assert_eq!(started_event.command_name, "getMore"); - - let get_more_event = command_events.next().unwrap(); - let failed_event = get_more_event.as_command_failed().unwrap(); - assert_eq!(failed_event.command_name, "getMore"); - - let bulk_write_event = command_events.next().unwrap(); - let started_event = bulk_write_event.as_command_started().unwrap(); - assert_eq!(started_event.command_name, "bulkWrite"); - - let bulk_write_event = command_events.next().unwrap(); - let succeeded_event = bulk_write_event.as_command_succeeded().unwrap(); - assert_eq!(succeeded_event.command_name, "bulkWrite"); - - let get_more_event = command_events.next().unwrap(); - let started_event = get_more_event.as_command_started().unwrap(); - assert_eq!(started_event.command_name, "getMore"); - - let get_more_event = command_events.next().unwrap(); - let succeeded_event = get_more_event.as_command_succeeded().unwrap(); - assert_eq!(succeeded_event.command_name, "getMore"); -} diff --git a/src/test/change_stream.rs b/src/test/change_stream.rs index f8040892b..19bea9e30 100644 --- a/src/test/change_stream.rs +++ b/src/test/change_stream.rs @@ -86,7 +86,7 @@ async fn tracks_resume_token() -> Result<()> { let events: Vec<_> = { let mut events = client.events.clone(); events - .get_command_events_mut(&["aggregate", "getMore"]) + .get_command_events(&["aggregate", "getMore"]) .into_iter() .filter_map(|ev| match ev { CommandEvent::Succeeded(s) => Some(s), @@ -232,7 +232,7 @@ async fn empty_batch_not_closed() -> Result<()> { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events_mut(&["aggregate", "getMore"]) + events.get_command_events(&["aggregate", "getMore"]) }; let cursor_id = match &events[1] { CommandEvent::Succeeded(CommandSucceededEvent { reply, .. }) => { @@ -318,7 +318,7 @@ async fn resume_start_at_operation_time() -> Result<()> { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events_mut(&["aggregate"]) + events.get_command_events(&["aggregate"]) }; assert_eq!(events.len(), 4); @@ -365,7 +365,7 @@ async fn batch_end_resume_token() -> Result<()> { #[allow(deprecated)] let commands = { let mut events = client.events.clone(); - events.get_command_events_mut(&["aggregate", "getMore"]) + events.get_command_events(&["aggregate", "getMore"]) }; assert!(matches!(commands.last(), Some( CommandEvent::Succeeded(CommandSucceededEvent { diff --git a/src/test/cursor.rs b/src/test/cursor.rs index f1b5f3b96..f8cd61c66 100644 --- a/src/test/cursor.rs +++ b/src/test/cursor.rs @@ -142,7 +142,7 @@ async fn batch_exhaustion() { let replies: Vec<_> = { let mut events = client.events.clone(); events - .get_command_events_mut(&["getMore"]) + .get_command_events(&["getMore"]) .into_iter() .filter_map(|e| e.as_command_succeeded().map(|e| e.reply.clone())) .collect() diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index 135ab5fcb..b36ece236 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -202,7 +202,7 @@ async fn retry_read_different_mongos() { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events_mut(&["find"]) + events.get_command_events(&["find"]) }; assert!( matches!( @@ -263,7 +263,7 @@ async fn retry_read_same_mongos() { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events_mut(&["find"]) + events.get_command_events(&["find"]) }; assert!( matches!( diff --git a/src/test/spec/retryable_writes.rs b/src/test/spec/retryable_writes.rs index 0ddaa9b10..9ab860288 100644 --- a/src/test/spec/retryable_writes.rs +++ b/src/test/spec/retryable_writes.rs @@ -603,7 +603,7 @@ async fn retry_write_different_mongos() { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events_mut(&["insert"]) + events.get_command_events(&["insert"]) }; assert!( matches!( @@ -665,7 +665,7 @@ async fn retry_write_same_mongos() { #[allow(deprecated)] let events = { let mut events = client.events.clone(); - events.get_command_events_mut(&["insert"]) + events.get_command_events(&["insert"]) }; assert!( matches!( diff --git a/src/test/util/event.rs b/src/test/util/event.rs index fa2d7e9ed..1198f4ce1 100644 --- a/src/test/util/event.rs +++ b/src/test/util/event.rs @@ -6,7 +6,7 @@ use crate::{ bson::doc, event::{ cmap::CmapEvent, - command::{CommandEvent, CommandFailedEvent, CommandStartedEvent, CommandSucceededEvent}, + command::{CommandEvent, CommandStartedEvent, CommandSucceededEvent}, sdam::SdamEvent, }, test::get_client_options, @@ -110,13 +110,6 @@ impl CommandEvent { _ => None, } } - - pub(crate) fn as_command_failed(&self) -> Option<&CommandFailedEvent> { - match self { - CommandEvent::Failed(e) => Some(e), - _ => None, - } - } } #[derive(Clone, Debug)] diff --git a/src/test/util/event_buffer.rs b/src/test/util/event_buffer.rs index fdefa861d..4da7b9f0f 100644 --- a/src/test/util/event_buffer.rs +++ b/src/test/util/event_buffer.rs @@ -232,28 +232,10 @@ impl EventBuffer { .collect() } - pub(crate) fn get_command_events(&self, command_names: &[&str]) -> Vec { - self.inner - .events - .lock() - .unwrap() - .data - .iter() - .filter_map(|(event, _)| match event { - Event::Command(command_event) - if command_names.contains(&command_event.command_name()) => - { - Some(command_event.clone()) - } - _ => None, - }) - .collect() - } - /// Remove all command events from the buffer, returning those matching any of the command /// names. #[deprecated = "use immutable methods"] - pub(crate) fn get_command_events_mut(&mut self, command_names: &[&str]) -> Vec { + pub(crate) fn get_command_events(&mut self, command_names: &[&str]) -> Vec { let mut out = vec![]; self.retain(|ev| match ev { Event::Command(cev) => { From a815a753eddf014cfa61b7efd223f93f1d4d7c1a Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 25 Apr 2024 13:01:11 -0600 Subject: [PATCH 62/75] add ns size batching test --- src/test/bulk_write.rs | 78 ++++++++++++++++++++++++++++-------------- 1 file changed, 53 insertions(+), 25 deletions(-) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 3d2eec069..fd4e20fe8 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -1,5 +1,5 @@ use crate::{ - bson::doc, + bson::{doc, Document}, error::{ClientBulkWriteError, ErrorKind}, options::WriteModel, test::{ @@ -217,7 +217,7 @@ async fn successful_cursor_iteration() { let max_bson_object_size = client.server_info.max_bson_object_size as usize; - let collection = client.database("db").collection::("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); let models = vec![ @@ -275,7 +275,7 @@ async fn failed_cursor_iteration() { let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(8); let _guard = client.enable_fail_point(fail_point).await.unwrap(); - let collection = client.database("db").collection::("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); let models = vec![ @@ -344,7 +344,7 @@ async fn cursor_iteration_in_a_transaction() { let max_bson_object_size = client.server_info.max_bson_object_size as usize; - let collection = client.database("db").collection::("coll"); + let collection = client.database("db").collection::("coll"); collection.drop().await.unwrap(); let mut session = client.start_session().await.unwrap(); @@ -384,6 +384,12 @@ async fn cursor_iteration_in_a_transaction() { assert_eq!(command_started_events.len(), 1); } +fn get_namespace<'a>(command: &'a Document) -> Option<&'a str> { + let ns_info = command.get_array("nsInfo").ok()?; + let ns_doc = ns_info[0].as_document()?; + ns_doc.get_str("ns").ok() +} + #[tokio::test] async fn namespace_batching() { let client = Client::test_builder().monitor_events().build().await; @@ -410,29 +416,51 @@ async fn namespace_batching() { let result = client.bulk_write(models).await.unwrap(); assert_eq!(result.inserted_count, max_write_batch_size + 1); - let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); + let mut command_started_events = client + .events + .get_command_started_events(&["bulkWrite"]) + .into_iter(); - let first_ns_info = command_started_events[0] - .command - .get_array("nsInfo") - .unwrap(); - assert_eq!(first_ns_info.len(), 1); - let first_ns = first_ns_info[0] - .as_document() - .unwrap() - .get_str("ns") - .unwrap(); + let first_event = command_started_events.next().unwrap(); + let first_ns = get_namespace(&first_event.command).unwrap(); assert_eq!(first_ns, "db.coll"); - let second_ns_info = command_started_events[1] - .command - .get_array("nsInfo") - .unwrap(); - assert_eq!(second_ns_info.len(), 1); - let second_ns = second_ns_info[0] - .as_document() - .unwrap() - .get_str("ns") - .unwrap(); + let second_event = command_started_events.next().unwrap(); + let second_ns = get_namespace(&second_event.command).unwrap(); assert_eq!(second_ns, "db.coll1"); } + +#[tokio::test] +async fn namespace_size_batching() { + let client = Client::test_builder().monitor_events().build().await; + + let first_namespace = Namespace::new("db", "coll"); + let mut models = vec![ + WriteModel::InsertOne { + namespace: first_namespace.clone(), + document: doc! { "a": "b".repeat(15_999_555) } + }; + 3 + ]; + + let second_namespace = Namespace::new("db", "c".repeat(200)); + models.push(WriteModel::InsertOne { + namespace: second_namespace.clone(), + document: doc! { "a": "b" }, + }); + let result = client.bulk_write(models).await.unwrap(); + assert_eq!(result.inserted_count, 4); + + let mut command_started_events = client + .events + .get_command_started_events(&["bulkWrite"]) + .into_iter(); + + let first_event = command_started_events.next().unwrap(); + let actual_first_ns = get_namespace(&first_event.command).unwrap(); + assert_eq!(actual_first_ns, &first_namespace.to_string()); + + let second_event = command_started_events.next().unwrap(); + let actual_second_ns = get_namespace(&second_event.command).unwrap(); + assert_eq!(actual_second_ns, &second_namespace.to_string()); +} From ad579feb90c106288ef1926d2a836be844966527 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 26 Apr 2024 11:34:14 -0600 Subject: [PATCH 63/75] different ns info batching test --- src/test/bulk_write.rs | 110 ++++++++++++++++++----------------------- 1 file changed, 48 insertions(+), 62 deletions(-) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index fd4e20fe8..7fe4d5545 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -384,83 +384,69 @@ async fn cursor_iteration_in_a_transaction() { assert_eq!(command_started_events.len(), 1); } -fn get_namespace<'a>(command: &'a Document) -> Option<&'a str> { - let ns_info = command.get_array("nsInfo").ok()?; - let ns_doc = ns_info[0].as_document()?; - ns_doc.get_str("ns").ok() -} - #[tokio::test] -async fn namespace_batching() { +async fn namespace_batch_splitting() { let client = Client::test_builder().monitor_events().build().await; + let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; + let max_bson_object_size = client.server_info.max_bson_object_size as usize; - if client.server_version_lt(8, 0) { - log_uncaptured("skipping namespace_batching: bulkWrite requires 8.0+"); - return; - } - - let max_write_batch_size = client.server_info.max_write_batch_size.unwrap(); - - let mut models = vec![ - WriteModel::InsertOne { - namespace: Namespace::new("db", "coll"), - document: doc! { "a": "b" } - }; - max_write_batch_size as usize - ]; - models.push(WriteModel::InsertOne { - namespace: Namespace::new("db", "coll1"), - document: doc! { "a": "b" }, - }); - - let result = client.bulk_write(models).await.unwrap(); - assert_eq!(result.inserted_count, max_write_batch_size + 1); - - let mut command_started_events = client - .events - .get_command_started_events(&["bulkWrite"]) - .into_iter(); - - let first_event = command_started_events.next().unwrap(); - let first_ns = get_namespace(&first_event.command).unwrap(); - assert_eq!(first_ns, "db.coll"); + let first_namespace = Namespace::new("db", "coll"); + let second_namespace = Namespace::new("db", "c".repeat(200)); - let second_event = command_started_events.next().unwrap(); - let second_ns = get_namespace(&second_event.command).unwrap(); - assert_eq!(second_ns, "db.coll1"); -} + let ops_bytes = max_message_size_bytes - 1122; -#[tokio::test] -async fn namespace_size_batching() { - let client = Client::test_builder().monitor_events().build().await; + let mut num_models = ops_bytes / max_bson_object_size; + let first_model = WriteModel::InsertOne { + namespace: first_namespace.clone(), + document: doc! { "a": "b".repeat(max_bson_object_size - 57) }, + }; + let mut models = vec![first_model; num_models]; - let first_namespace = Namespace::new("db", "coll"); - let mut models = vec![ - WriteModel::InsertOne { + let remainder_bytes = ops_bytes % max_bson_object_size; + if remainder_bytes >= 217 { + models.push(WriteModel::InsertOne { namespace: first_namespace.clone(), - document: doc! { "a": "b".repeat(15_999_555) } - }; - 3 - ]; + document: doc! { "a": "b".repeat(remainder_bytes - 57) }, + }); + num_models += 1; + } - let second_namespace = Namespace::new("db", "c".repeat(200)); - models.push(WriteModel::InsertOne { + let second_model = WriteModel::InsertOne { namespace: second_namespace.clone(), document: doc! { "a": "b" }, - }); + }; + models.push(second_model); + let result = client.bulk_write(models).await.unwrap(); - assert_eq!(result.inserted_count, 4); + assert_eq!(result.inserted_count as usize, num_models + 1); let mut command_started_events = client .events .get_command_started_events(&["bulkWrite"]) .into_iter(); + assert_eq!(command_started_events.len(), 2); - let first_event = command_started_events.next().unwrap(); - let actual_first_ns = get_namespace(&first_event.command).unwrap(); - assert_eq!(actual_first_ns, &first_namespace.to_string()); - - let second_event = command_started_events.next().unwrap(); - let actual_second_ns = get_namespace(&second_event.command).unwrap(); - assert_eq!(actual_second_ns, &second_namespace.to_string()); + let first_command = command_started_events.next().unwrap().command; + let first_ns_info = first_command.get_array("nsInfo").unwrap(); + assert_eq!(first_ns_info.len(), 1); + let namespace = first_ns_info[0] + .as_document() + .unwrap() + .get_str("ns") + .unwrap(); + assert_eq!(namespace, &first_namespace.to_string()); + let first_ops = first_command.get_array("ops").unwrap(); + assert_eq!(first_ops.len(), num_models); + + let second_command = command_started_events.next().unwrap().command; + let second_ns_info = second_command.get_array("nsInfo").unwrap(); + assert_eq!(second_ns_info.len(), 1); + let namespace = second_ns_info[0] + .as_document() + .unwrap() + .get_str("ns") + .unwrap(); + assert_eq!(namespace, &second_namespace.to_string()); + let second_ops = second_command.get_array("ops").unwrap(); + assert_eq!(second_ops.len(), 1); } From 72ee8c5fa5824d0794be5aff1644606ecb3f01fd Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 26 Apr 2024 11:44:16 -0600 Subject: [PATCH 64/75] too large test --- src/test/bulk_write.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 7fe4d5545..42727a3cd 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -450,3 +450,17 @@ async fn namespace_batch_splitting() { let second_ops = second_command.get_array("ops").unwrap(); assert_eq!(second_ops.len(), 1); } + +#[tokio::test] +async fn too_large_client_error() { + let client = Client::test_builder().monitor_events().build().await; + let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; + + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document: doc! { "a": "b".repeat(max_message_size_bytes) }, + }; + + let error = client.bulk_write(vec![model]).await.unwrap_err(); + assert!(!error.is_server_error()); +} From 7fcec2dc8f99dc6595e51392b13978f6b5e61448 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 26 Apr 2024 13:12:40 -0600 Subject: [PATCH 65/75] skip tests --- src/test/bulk_write.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 42727a3cd..2c5817bd8 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -390,6 +390,11 @@ async fn namespace_batch_splitting() { let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; let max_bson_object_size = client.server_info.max_bson_object_size as usize; + if client.server_version_lt(8, 0) { + log_uncaptured("skipping namespace_batch_splitting: bulkWrite requires 8.0+"); + return; + } + let first_namespace = Namespace::new("db", "coll"); let second_namespace = Namespace::new("db", "c".repeat(200)); @@ -456,6 +461,11 @@ async fn too_large_client_error() { let client = Client::test_builder().monitor_events().build().await; let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; + if client.server_version_lt(8, 0) { + log_uncaptured("skipping too_large_client_error: bulkWrite requires 8.0+"); + return; + } + let model = WriteModel::InsertOne { namespace: Namespace::new("db", "coll"), document: doc! { "a": "b".repeat(max_message_size_bytes) }, From e23d49c10f0696d23f51d016303e845fd9137159 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Wed, 1 May 2024 16:24:30 -0600 Subject: [PATCH 66/75] rework namespace splitting test --- src/test/bulk_write.rs | 91 ++++++++++++++++++++++++++++-------------- 1 file changed, 61 insertions(+), 30 deletions(-) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 2c5817bd8..9396dfbb0 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -386,26 +386,25 @@ async fn cursor_iteration_in_a_transaction() { #[tokio::test] async fn namespace_batch_splitting() { - let client = Client::test_builder().monitor_events().build().await; - let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; - let max_bson_object_size = client.server_info.max_bson_object_size as usize; + let first_namespace = Namespace::new("db", "coll"); + let mut client = Client::test_builder().monitor_events().build().await; if client.server_version_lt(8, 0) { log_uncaptured("skipping namespace_batch_splitting: bulkWrite requires 8.0+"); return; } - let first_namespace = Namespace::new("db", "coll"); - let second_namespace = Namespace::new("db", "c".repeat(200)); + let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; + let max_bson_object_size = client.server_info.max_bson_object_size as usize; let ops_bytes = max_message_size_bytes - 1122; + let num_models = ops_bytes / max_bson_object_size; - let mut num_models = ops_bytes / max_bson_object_size; - let first_model = WriteModel::InsertOne { + let model = WriteModel::InsertOne { namespace: first_namespace.clone(), document: doc! { "a": "b".repeat(max_bson_object_size - 57) }, }; - let mut models = vec![first_model; num_models]; + let mut models = vec![model; num_models]; let remainder_bytes = ops_bytes % max_bson_object_size; if remainder_bytes >= 217 { @@ -413,47 +412,79 @@ async fn namespace_batch_splitting() { namespace: first_namespace.clone(), document: doc! { "a": "b".repeat(remainder_bytes - 57) }, }); - num_models += 1; } - let second_model = WriteModel::InsertOne { + // Case 1: no batch-splitting required + + let mut first_models = models.clone(); + first_models.push(WriteModel::InsertOne { + namespace: first_namespace.clone(), + document: doc! { "a": "b" }, + }); + let num_models = first_models.len(); + + let result = client.bulk_write(first_models).await.unwrap(); + assert_eq!(result.inserted_count as usize, num_models); + + let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); + assert_eq!(command_started_events.len(), 1); + + let event = &command_started_events[0]; + + let ops = event.command.get_array("ops").unwrap(); + assert_eq!(ops.len(), num_models); + + let ns_info = event.command.get_array("nsInfo").unwrap(); + assert_eq!(ns_info.len(), 1); + let namespace = ns_info[0].as_document().unwrap().get_str("ns").unwrap(); + assert_eq!(namespace, &first_namespace.to_string()); + + // Case 2: batch-splitting required + + client.events.clear_cached_events(); + + let second_namespace = Namespace::new("db", "c".repeat(200)); + + let mut second_models = models.clone(); + second_models.push(WriteModel::InsertOne { namespace: second_namespace.clone(), document: doc! { "a": "b" }, - }; - models.push(second_model); + }); + let num_models = second_models.len(); - let result = client.bulk_write(models).await.unwrap(); - assert_eq!(result.inserted_count as usize, num_models + 1); + let result = client.bulk_write(second_models).await.unwrap(); + assert_eq!(result.inserted_count as usize, num_models); - let mut command_started_events = client - .events - .get_command_started_events(&["bulkWrite"]) - .into_iter(); + let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); assert_eq!(command_started_events.len(), 2); - let first_command = command_started_events.next().unwrap().command; - let first_ns_info = first_command.get_array("nsInfo").unwrap(); + let first_event = &command_started_events[0]; + + let first_ops = first_event.command.get_array("ops").unwrap(); + assert_eq!(first_ops.len(), num_models - 1); + + let first_ns_info = first_event.command.get_array("nsInfo").unwrap(); assert_eq!(first_ns_info.len(), 1); - let namespace = first_ns_info[0] + let actual_first_namespace = first_ns_info[0] .as_document() .unwrap() .get_str("ns") .unwrap(); - assert_eq!(namespace, &first_namespace.to_string()); - let first_ops = first_command.get_array("ops").unwrap(); - assert_eq!(first_ops.len(), num_models); + assert_eq!(actual_first_namespace, &first_namespace.to_string()); + + let second_event = &command_started_events[1]; - let second_command = command_started_events.next().unwrap().command; - let second_ns_info = second_command.get_array("nsInfo").unwrap(); + let second_ops = second_event.command.get_array("ops").unwrap(); + assert_eq!(second_ops.len(), 1); + + let second_ns_info = second_event.command.get_array("nsInfo").unwrap(); assert_eq!(second_ns_info.len(), 1); - let namespace = second_ns_info[0] + let actual_second_namespace = second_ns_info[0] .as_document() .unwrap() .get_str("ns") .unwrap(); - assert_eq!(namespace, &second_namespace.to_string()); - let second_ops = second_command.get_array("ops").unwrap(); - assert_eq!(second_ops.len(), 1); + assert_eq!(actual_second_namespace, &second_namespace.to_string()); } #[tokio::test] From c598745a40f189315cd15ae95b563e5c4486f2d9 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 3 May 2024 10:11:50 -0600 Subject: [PATCH 67/75] error for fle --- src/action/bulk_write.rs | 14 ++++++++++++ src/action/insert_many.rs | 5 +---- src/action/insert_one.rs | 7 +----- src/operation/bulk_write.rs | 45 ++++++------------------------------- 4 files changed, 23 insertions(+), 48 deletions(-) diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index 29444a131..123b2d2b3 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -67,6 +67,20 @@ impl<'a> Action for BulkWrite<'a> { type Future = BulkWriteFuture; async fn execute(mut self) -> Result { + #[cfg(feature = "in-use-encryption-unstable")] + if self.client.should_auto_encrypt().await { + use mongocrypt::error::{Error as EncryptionError, ErrorKind as EncryptionErrorKind}; + + let error = EncryptionError { + kind: EncryptionErrorKind::Client, + code: None, + message: Some( + "bulkWrite does not currently support automatic encryption".to_string(), + ), + }; + return Err(ErrorKind::Encryption(error).into()); + } + resolve_write_concern_with_session!( self.client, self.options, diff --git a/src/action/insert_many.rs b/src/action/insert_many.rs index 0c2e349d1..a6c0040c3 100644 --- a/src/action/insert_many.rs +++ b/src/action/insert_many.rs @@ -103,10 +103,7 @@ impl<'a> Action for InsertMany<'a> { .as_ref() .and_then(|o| o.ordered) .unwrap_or(true); - #[cfg(feature = "in-use-encryption-unstable")] - let encrypted = self.coll.client().auto_encryption_opts().await.is_some(); - #[cfg(not(feature = "in-use-encryption-unstable"))] - let encrypted = false; + let encrypted = self.coll.client().should_auto_encrypt().await; let mut cumulative_failure: Option = None; let mut error_labels: HashSet = Default::default(); diff --git a/src/action/insert_one.rs b/src/action/insert_one.rs index edaebea11..f7d70afcc 100644 --- a/src/action/insert_one.rs +++ b/src/action/insert_one.rs @@ -87,18 +87,13 @@ impl<'a> Action for InsertOne<'a> { async fn execute(mut self) -> Result { resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; - #[cfg(feature = "in-use-encryption-unstable")] - let encrypted = self.coll.client().auto_encryption_opts().await.is_some(); - #[cfg(not(feature = "in-use-encryption-unstable"))] - let encrypted = false; - let doc = self.doc?; let insert = Op::new( self.coll.namespace(), vec![doc.deref()], self.options.map(InsertManyOptions::from_insert_one_options), - encrypted, + self.coll.client().should_auto_encrypt().await, ); self.coll .client() diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 131922811..70f0529d7 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -7,7 +7,7 @@ use futures_util::{FutureExt, TryStreamExt}; use crate::{ bson::{rawdoc, Bson, RawDocumentBuf}, - bson_util::{self, array_entry_size_bytes, extend_raw_document_buf, vec_to_raw_array_buf}, + bson_util::{self, extend_raw_document_buf}, checked::Checked, cmap::{Command, RawCommandResponse, StreamDescription}, cursor::CursorSpecification, @@ -22,13 +22,7 @@ use crate::{ SessionCursor, }; -use super::{ - ExecutionContext, - Retryability, - WriteResponseBody, - MAX_ENCRYPTED_WRITE_SIZE, - OP_MSG_OVERHEAD_BYTES, -}; +use super::{ExecutionContext, Retryability, WriteResponseBody, OP_MSG_OVERHEAD_BYTES}; use server_responses::*; @@ -37,7 +31,6 @@ pub(crate) struct BulkWrite<'a> { models: &'a [WriteModel], offset: usize, options: Option<&'a BulkWriteOptions>, - encrypted: bool, /// The _ids of the inserted documents. This value is populated in `build`. inserted_ids: HashMap, /// The number of writes that were sent to the server. This value is populated in `build`. @@ -51,13 +44,11 @@ impl<'a> BulkWrite<'a> { offset: usize, options: Option<&'a BulkWriteOptions>, ) -> BulkWrite<'a> { - let encrypted = client.should_auto_encrypt().await; Self { client, models, offset, options, - encrypted, n_attempted: 0, inserted_ids: HashMap::new(), } @@ -226,23 +217,7 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { .into()); } - let mut split = false; - if self.encrypted && i != 0 { - let model_entry_size = array_entry_size_bytes(i, operation_size)?; - let namespace_entry_size = if namespace_size > 0 { - array_entry_size_bytes(namespace_index, namespace_size)? - } else { - 0 - }; - if current_size + namespace_entry_size + model_entry_size > MAX_ENCRYPTED_WRITE_SIZE - { - split = true; - } - } else if current_size + namespace_size + operation_size > max_document_sequences_size { - split = true; - } - - if split { + if current_size + namespace_size + operation_size > max_document_sequences_size { // Remove the namespace doc from the list if one was added for this operation. if namespace_size > 0 { let last_index = namespace_info.namespaces.len() - 1; @@ -260,16 +235,10 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { self.n_attempted = ops.len(); - if self.encrypted { - command_body.append("nsInfo", vec_to_raw_array_buf(namespace_info.namespaces)); - command_body.append("ops", vec_to_raw_array_buf(ops)); - Ok(Command::new(Self::NAME, "admin", command_body)) - } else { - let mut command = Command::new(Self::NAME, "admin", command_body); - command.add_document_sequence("nsInfo", namespace_info.namespaces); - command.add_document_sequence("ops", ops); - Ok(command) - } + let mut command = Command::new(Self::NAME, "admin", command_body); + command.add_document_sequence("nsInfo", namespace_info.namespaces); + command.add_document_sequence("ops", ops); + Ok(command) } fn handle_response<'b>( From 98eb33c7d1d75896e5eeef58fcaee37bfb623bda Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 3 May 2024 11:09:26 -0600 Subject: [PATCH 68/75] sync retryable writes --- .../unified/handshakeError.json | 216 ++++++++++++++++++ .../unified/handshakeError.yml | 90 ++++++++ 2 files changed, 306 insertions(+) diff --git a/src/test/spec/json/retryable-writes/unified/handshakeError.json b/src/test/spec/json/retryable-writes/unified/handshakeError.json index df37bd723..3c4646375 100644 --- a/src/test/spec/json/retryable-writes/unified/handshakeError.json +++ b/src/test/spec/json/retryable-writes/unified/handshakeError.json @@ -53,6 +53,222 @@ } ], "tests": [ + { + "description": "client.clientBulkWrite succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } + } + ] + } + ] + }, { "description": "collection.insertOne succeeds after retryable handshake network error", "operations": [ diff --git a/src/test/spec/json/retryable-writes/unified/handshakeError.yml b/src/test/spec/json/retryable-writes/unified/handshakeError.yml index 9b2774bc7..131bbf2e5 100644 --- a/src/test/spec/json/retryable-writes/unified/handshakeError.yml +++ b/src/test/spec/json/retryable-writes/unified/handshakeError.yml @@ -50,6 +50,96 @@ tests: # - Triggers failpoint (second time). # - Tests whether operation successfully retries the handshake and succeeds. + - description: "client.clientBulkWrite succeeds after retryable handshake network error" + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0 + operations: + - name: failPoint + object: testRunner + arguments: + client: *client + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: [ping, saslContinue] + closeConnection: true + - name: runCommand + object: *database + arguments: { commandName: ping, command: { ping: 1 } } + expectError: { isError: true } + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: retryable-writes-handshake-tests.coll + document: { _id: 8, x: 88 } + expectEvents: + - client: *client + eventType: cmap + events: + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - client: *client + events: + - commandStartedEvent: + command: { ping: 1 } + databaseName: *databaseName + - commandFailedEvent: + commandName: ping + - commandStartedEvent: + commandName: bulkWrite + - commandSucceededEvent: + commandName: bulkWrite + + - description: "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)" + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0 + operations: + - name: failPoint + object: testRunner + arguments: + client: *client + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: [ping, saslContinue] + closeConnection: true + - name: runCommand + object: *database + arguments: { commandName: ping, command: { ping: 1 } } + expectError: { isError: true } + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: retryable-writes-handshake-tests.coll + document: { _id: 8, x: 88 } + expectEvents: + - client: *client + eventType: cmap + events: + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - client: *client + events: + - commandStartedEvent: + command: { ping: 1 } + databaseName: *databaseName + - commandFailedEvent: + commandName: ping + - commandStartedEvent: + commandName: bulkWrite + - commandSucceededEvent: + commandName: bulkWrite + - description: "collection.insertOne succeeds after retryable handshake network error" operations: - name: failPoint From 808a64a1a17c9a5214c41b1e6b815a47dd230483 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 3 May 2024 13:05:28 -0600 Subject: [PATCH 69/75] add encryption test --- src/test/bulk_write.rs | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 9396dfbb0..81da582fb 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -505,3 +505,39 @@ async fn too_large_client_error() { let error = client.bulk_write(vec![model]).await.unwrap_err(); assert!(!error.is_server_error()); } + +#[cfg(feature = "in-use-encryption-unstable")] +#[tokio::test] +async fn encryption_error() { + use crate::{ + client::csfle::options::{AutoEncryptionOptions, KmsProviders}, + mongocrypt::ctx::KmsProvider, + }; + + let kms_providers = KmsProviders::new(vec![( + KmsProvider::Aws, + doc! { "accessKeyId": "foo", "secretAccessKey": "bar" }, + None, + )]) + .unwrap(); + let encrypted_options = AutoEncryptionOptions::new(Namespace::new("db", "coll"), kms_providers); + let encrypted_client = Client::test_builder() + .encrypted_options(encrypted_options) + .build() + .await; + + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document: doc! { "a": "b" }, + }; + let error = encrypted_client.bulk_write(vec![model]).await.unwrap_err(); + + let ErrorKind::Encryption(encryption_error) = *error.kind else { + panic!("expected encryption error, got {:?}", error); + }; + + assert_eq!( + encryption_error.message, + Some("bulkWrite does not currently support automatic encryption".to_string()) + ); +} From 3c5a9360611cfa97c6b37c2f79074ae4e8d656a5 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 3 May 2024 13:09:54 -0600 Subject: [PATCH 70/75] sync transaction tests --- .../unified/mongos-pin-auto-tests.py | 18 +- .../transactions/unified/mongos-pin-auto.json | 294 ++++++++++++++++++ .../transactions/unified/mongos-pin-auto.yml | 90 ++++++ 3 files changed, 400 insertions(+), 2 deletions(-) diff --git a/src/test/spec/json/transactions/unified/mongos-pin-auto-tests.py b/src/test/spec/json/transactions/unified/mongos-pin-auto-tests.py index 99a34b485..ad2aeabd1 100644 --- a/src/test/spec/json/transactions/unified/mongos-pin-auto-tests.py +++ b/src/test/spec/json/transactions/unified/mongos-pin-auto-tests.py @@ -291,6 +291,11 @@ insert: *collection_name documents: - { _id : 1 }'''), + # clientBulkWrite: + 'clientBulkWrite': ('bulkWrite', '*client0', r'''models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 }'''), } # Maps from error_name to error_data. @@ -313,7 +318,11 @@ def create_pin_test(op_name, error_name): error_data = NON_TRANSIENT_ERRORS[error_name] if op_name.startswith('bulkWrite'): op_name = 'bulkWrite' - return TEMPLATE.format(**locals()) + test = TEMPLATE.format(**locals()) + if op_name == 'clientBulkWrite': + test += ' runOnRequirements:\n' + test += ' - minServerVersion: "8.0" # `bulkWrite` added to server 8.0"\n' + return test def create_unpin_test(op_name, error_name): @@ -324,7 +333,12 @@ def create_unpin_test(op_name, error_name): error_data = TRANSIENT_ERRORS[error_name] if op_name.startswith('bulkWrite'): op_name = 'bulkWrite' - return TEMPLATE.format(**locals()) + test = TEMPLATE.format(**locals()) + if op_name == 'clientBulkWrite': + test += ' runOnRequirements:\n' + test += ' - minServerVersion: "8.0" # `bulkWrite` added to server 8.0"\n' + return test + tests = [] diff --git a/src/test/spec/json/transactions/unified/mongos-pin-auto.json b/src/test/spec/json/transactions/unified/mongos-pin-auto.json index 93eac8bb7..27db52040 100644 --- a/src/test/spec/json/transactions/unified/mongos-pin-auto.json +++ b/src/test/spec/json/transactions/unified/mongos-pin-auto.json @@ -2004,6 +2004,104 @@ } ] }, + { + "description": "remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, { "description": "unpin after transient connection error on insertOne insert", "operations": [ @@ -5175,6 +5273,202 @@ ] } ] + }, + { + "description": "unpin after transient connection error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] } ] } diff --git a/src/test/spec/json/transactions/unified/mongos-pin-auto.yml b/src/test/spec/json/transactions/unified/mongos-pin-auto.yml index 7a7634755..a80dd6203 100644 --- a/src/test/spec/json/transactions/unified/mongos-pin-auto.yml +++ b/src/test/spec/json/transactions/unified/mongos-pin-auto.yml @@ -676,6 +676,36 @@ tests: - *abortTransaction outcome: *outcome + - description: remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + errorCode: 11601 + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsOmit: ["TransientTransactionError"] + - *assertSessionPinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + - description: unpin after transient connection error on insertOne insert operations: - *startTransaction @@ -1614,3 +1644,63 @@ tests: - *abortTransaction outcome: *outcome + - description: unpin after transient connection error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + closeConnection: true + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsContain: ["TransientTransactionError"] + - *assertSessionUnpinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + + - description: unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + errorCode: 91 + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsContain: ["TransientTransactionError"] + - *assertSessionUnpinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + From fbc6651f5d8889c965d318e2ae25fb2918be8cdf Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Fri, 3 May 2024 16:05:59 -0600 Subject: [PATCH 71/75] minor: bump clippy to 1.78.0 --- .evergreen/check-clippy.sh | 4 ++-- src/client/auth/oidc.rs | 2 +- src/client/options/parse.rs | 2 +- src/lib.rs | 2 +- src/sdam/description/topology.rs | 2 +- src/sdam/srv_polling/test.rs | 4 ++-- src/test.rs | 2 +- src/test/spec/initial_dns_seedlist_discovery.rs | 4 +++- src/test/spec/retryable_reads.rs | 5 ++--- src/test/spec/retryable_writes.rs | 7 +++---- src/test/spec/trace.rs | 10 +++++----- src/test/spec/unified_runner/operation.rs | 4 +++- src/test/spec/unified_runner/test_file.rs | 2 +- 13 files changed, 26 insertions(+), 24 deletions(-) diff --git a/.evergreen/check-clippy.sh b/.evergreen/check-clippy.sh index 7d507af3d..3fb3911c2 100755 --- a/.evergreen/check-clippy.sh +++ b/.evergreen/check-clippy.sh @@ -5,7 +5,7 @@ set -o errexit source ./.evergreen/env.sh # Pin clippy to the latest version. This should be updated when new versions of Rust are released. -CLIPPY_VERSION=1.75.0 +CLIPPY_VERSION=1.78.0 rustup install $CLIPPY_VERSION @@ -13,4 +13,4 @@ rustup install $CLIPPY_VERSION cargo +$CLIPPY_VERSION clippy --all-targets -p mongodb -- -D warnings # Check with all features. -cargo +$CLIPPY_VERSION clippy --all-targets --all-features -p mongodb -- -D warnings \ No newline at end of file +cargo +$CLIPPY_VERSION clippy --all-targets --all-features -p mongodb -- -D warnings diff --git a/src/client/auth/oidc.rs b/src/client/auth/oidc.rs index 67ca01dca..f8036128c 100644 --- a/src/client/auth/oidc.rs +++ b/src/client/auth/oidc.rs @@ -279,7 +279,7 @@ impl Cache { self.idp_server_info = idp_server_info; } self.access_token = Some(response.access_token.clone()); - self.refresh_token = response.refresh_token.clone(); + self.refresh_token.clone_from(&response.refresh_token); self.last_call_time = Instant::now(); self.token_gen_id += 1; } diff --git a/src/client/options/parse.rs b/src/client/options/parse.rs index c8542068d..bf7b22bd9 100644 --- a/src/client/options/parse.rs +++ b/src/client/options/parse.rs @@ -21,7 +21,7 @@ impl Action for ParseConnectionString { let mut options = ClientOptions::from_connection_string(conn_str); #[cfg(feature = "dns-resolver")] { - options.resolver_config = self.resolver_config.clone(); + options.resolver_config.clone_from(&self.resolver_config); } let resolved = host_info.resolve(self.resolver_config).await?; diff --git a/src/lib.rs b/src/lib.rs index 9cf9e7e96..af563acbd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,7 +4,7 @@ #![warn(clippy::cast_possible_truncation)] #![warn(clippy::cast_possible_wrap)] #![cfg_attr( - feature = "cargo-clippy", + feature = "clippy", allow( clippy::unreadable_literal, clippy::cognitive_complexity, diff --git a/src/sdam/description/topology.rs b/src/sdam/description/topology.rs index 2b4536ecd..6d9dd89d1 100644 --- a/src/sdam/description/topology.rs +++ b/src/sdam/description/topology.rs @@ -175,7 +175,7 @@ impl TopologyDescription { } self.single_seed = self.servers.len() == 1; - self.set_name = options.repl_set_name.clone(); + self.set_name.clone_from(&options.repl_set_name); self.local_threshold = options.local_threshold; self.heartbeat_freq = options.heartbeat_freq; self.srv_max_hosts = options.srv_max_hosts; diff --git a/src/sdam/srv_polling/test.rs b/src/sdam/srv_polling/test.rs index 547dc299b..6b0310d7e 100644 --- a/src/sdam/srv_polling/test.rs +++ b/src/sdam/srv_polling/test.rs @@ -43,7 +43,7 @@ async fn run_test_extra( new_hosts: Result>, ) -> HashSet { let mut options = ClientOptions::new_srv(); - options.hosts = DEFAULT_HOSTS.clone(); + options.hosts.clone_from(&DEFAULT_HOSTS); options.test_options_mut().disable_monitoring_threads = true; options.srv_max_hosts = max_hosts; let mut topology = Topology::new(options.clone()).unwrap(); @@ -132,7 +132,7 @@ async fn load_balanced_no_srv_polling() { let hosts = vec![localhost_test_build_10gen(27017)]; let mut options = ClientOptions::new_srv(); let rescan_interval = options.original_srv_info.as_ref().cloned().unwrap().min_ttl; - options.hosts = hosts.clone(); + options.hosts.clone_from(&hosts); options.load_balanced = Some(true); options.test_options_mut().mock_lookup_hosts = Some(make_lookup_hosts(vec![ localhost_test_build_10gen(27017), diff --git a/src/test.rs b/src/test.rs index e7dd6f47f..a0dea5362 100644 --- a/src/test.rs +++ b/src/test.rs @@ -114,7 +114,7 @@ pub(crate) static DEFAULT_GLOBAL_TRACING_HANDLER: Lazy = Lazy::n pub(crate) fn update_options_for_testing(options: &mut ClientOptions) { if options.server_api.is_none() { - options.server_api = SERVER_API.clone(); + options.server_api.clone_from(&SERVER_API); } #[cfg(any( diff --git a/src/test/spec/initial_dns_seedlist_discovery.rs b/src/test/spec/initial_dns_seedlist_discovery.rs index 944ebb918..32b619c5e 100644 --- a/src/test/spec/initial_dns_seedlist_discovery.rs +++ b/src/test/spec/initial_dns_seedlist_discovery.rs @@ -135,7 +135,9 @@ async fn run_test(mut test_file: TestFile) { } else { let mut options_with_tls = options.clone(); if requires_tls { - options_with_tls.tls = get_client_options().await.tls.clone(); + options_with_tls + .tls + .clone_from(&get_client_options().await.tls); } let client = Client::with_options(options_with_tls).unwrap(); diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index edf4972d6..e5f1a5422 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -138,9 +138,8 @@ async fn retry_read_pool_cleared() { .expect("pool clear should occur"); let next_cmap_events = subscriber - .collect_events(Duration::from_millis(1000), |event| match event { - Event::Cmap(_) => true, - _ => false, + .collect_events(Duration::from_millis(1000), |event| { + matches!(event, Event::Cmap(_)) }) .await; diff --git a/src/test/spec/retryable_writes.rs b/src/test/spec/retryable_writes.rs index 6eb616f48..f71214fe6 100644 --- a/src/test/spec/retryable_writes.rs +++ b/src/test/spec/retryable_writes.rs @@ -49,7 +49,7 @@ async fn run_legacy() { continue; } let mut options = test_case.client_options.unwrap_or_default(); - options.hosts = get_client_options().await.hosts.clone(); + options.hosts.clone_from(&get_client_options().await.hosts); if options.heartbeat_freq.is_none() { options.heartbeat_freq = Some(MIN_HEARTBEAT_FREQUENCY); } @@ -455,9 +455,8 @@ async fn retry_write_pool_cleared() { .expect("pool clear should occur"); let next_cmap_events = subscriber - .collect_events(Duration::from_millis(1000), |event| match event { - Event::Cmap(_) => true, - _ => false, + .collect_events(Duration::from_millis(1000), |event| { + matches!(event, Event::Cmap(_)) }) .await; diff --git a/src/test/spec/trace.rs b/src/test/spec/trace.rs index 1a51cfd6e..2829967be 100644 --- a/src/test/spec/trace.rs +++ b/src/test/spec/trace.rs @@ -49,27 +49,27 @@ fn tracing_truncation() { assert_eq!(s, String::from("...")); // we should "round up" to the end of the first emoji - s = two_emoji.clone(); + s.clone_from(&two_emoji); truncate_on_char_boundary(&mut s, 1); assert_eq!(s, String::from("🤔...")); // 4 is a boundary, so we should truncate there - s = two_emoji.clone(); + s.clone_from(&two_emoji); truncate_on_char_boundary(&mut s, 4); assert_eq!(s, String::from("🤔...")); // we should round up to the full string - s = two_emoji.clone(); + s.clone_from(&two_emoji); truncate_on_char_boundary(&mut s, 5); assert_eq!(s, two_emoji); // end of string is a boundary, so we should truncate there - s = two_emoji.clone(); + s.clone_from(&two_emoji); truncate_on_char_boundary(&mut s, 8); assert_eq!(s, two_emoji); // we should get the full string back if the new length is longer than the original - s = two_emoji.clone(); + s.clone_from(&two_emoji); truncate_on_char_boundary(&mut s, 10); assert_eq!(s, two_emoji); } diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index 058f135f2..9be1a8b7e 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -124,6 +124,7 @@ pub(crate) trait TestOperation: Debug + Send + Sync { /// If this operation specifies entities to create, returns those entities. Otherwise, /// returns None. + #[cfg(feature = "tracing-unstable")] fn test_file_entities(&self) -> Option<&Vec> { None } @@ -2307,7 +2308,7 @@ impl TestOperation for RenameCollection { let target = test_runner.get_collection(id).await; let ns = target.namespace(); let mut to_ns = ns.clone(); - to_ns.coll = self.to.clone(); + to_ns.coll.clone_from(&self.to); let cmd = doc! { "renameCollection": crate::bson::to_bson(&ns)?, "to": crate::bson::to_bson(&to_ns)?, @@ -2741,6 +2742,7 @@ impl TestOperation for CreateEntities { .boxed() } + #[cfg(feature = "tracing-unstable")] fn test_file_entities(&self) -> Option<&Vec> { Some(&self.entities) } diff --git a/src/test/spec/unified_runner/test_file.rs b/src/test/spec/unified_runner/test_file.rs index 7292b55f8..6bb2b68b7 100644 --- a/src/test/spec/unified_runner/test_file.rs +++ b/src/test/spec/unified_runner/test_file.rs @@ -500,7 +500,7 @@ impl ExpectError { let description = description.as_ref(); if let Some(is_client_error) = self.is_client_error { - if is_client_error != !error.is_server_error() { + if is_client_error == error.is_server_error() { return Err(format!( "{}: expected client error but got {:?}", description, error From c086020e773ce115d446bfea121522bd859b6286 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Thu, 9 May 2024 14:39:19 -0600 Subject: [PATCH 72/75] kevin comments --- src/action/bulk_write.rs | 30 ++-- src/client/options/bulk_write.rs | 1 - src/cmap/test/integration.rs | 25 ++-- src/operation/aggregate/test.rs | 96 ------------- src/operation/bulk_write.rs | 37 ++--- src/operation/insert.rs | 21 +-- src/test/bulk_write.rs | 128 ++++++++++-------- src/test/spec/retryable_reads.rs | 2 + .../unified_runner/operation/bulk_write.rs | 3 - 9 files changed, 132 insertions(+), 211 deletions(-) delete mode 100644 src/operation/aggregate/test.rs diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs index 123b2d2b3..96996b3fd 100644 --- a/src/action/bulk_write.rs +++ b/src/action/bulk_write.rs @@ -29,6 +29,20 @@ pub struct BulkWrite<'a> { } impl<'a> BulkWrite<'a> { + option_setters!(options: BulkWriteOptions; + ordered: bool, + bypass_document_validation: bool, + comment: Bson, + let_vars: Document, + verbose_results: bool, + write_concern: WriteConcern, + ); + + pub fn session(mut self, session: &'a mut ClientSession) -> BulkWrite<'a> { + self.session = Some(session); + self + } + fn new(client: &'a Client, models: Vec) -> Self { Self { client, @@ -46,22 +60,6 @@ impl<'a> BulkWrite<'a> { } } -impl<'a> BulkWrite<'a> { - option_setters!(options: BulkWriteOptions; - ordered: bool, - bypass_document_validation: bool, - comment: Bson, - let_vars: Document, - verbose_results: bool, - write_concern: WriteConcern, - ); - - pub fn session(mut self, session: &'a mut ClientSession) -> BulkWrite<'a> { - self.session = Some(session); - self - } -} - #[action_impl] impl<'a> Action for BulkWrite<'a> { type Future = BulkWriteFuture; diff --git a/src/client/options/bulk_write.rs b/src/client/options/bulk_write.rs index c04e6260a..d8558d97f 100644 --- a/src/client/options/bulk_write.rs +++ b/src/client/options/bulk_write.rs @@ -113,7 +113,6 @@ pub enum WriteModel { filter: Document, #[serde(rename = "updateMods")] replacement: Document, - array_filters: Option, collation: Option, hint: Option, upsert: Option, diff --git a/src/cmap/test/integration.rs b/src/cmap/test/integration.rs index 7d7a0a997..97fe4c022 100644 --- a/src/cmap/test/integration.rs +++ b/src/cmap/test/integration.rs @@ -189,16 +189,12 @@ async fn connection_error_during_establishment() { return; } - let _guard = client - .enable_fail_point( - FailPoint::fail_command( - &[LEGACY_HELLO_COMMAND_NAME, "hello"], - FailPointMode::Times(10), - ) - .error_code(1234), - ) - .await - .unwrap(); + let fail_point = FailPoint::fail_command( + &[LEGACY_HELLO_COMMAND_NAME, "hello"], + FailPointMode::Times(10), + ) + .error_code(1234); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let buffer = EventBuffer::::new(); #[allow(deprecated)] @@ -248,12 +244,9 @@ async fn connection_error_during_operation() { return; } - let _guard = client - .enable_fail_point( - FailPoint::fail_command(&["ping"], FailPointMode::Times(10)).close_connection(true), - ) - .await - .unwrap(); + let fail_point = + FailPoint::fail_command(&["ping"], FailPointMode::Times(10)).close_connection(true); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); #[allow(deprecated)] let mut subscriber = buffer.subscribe(); diff --git a/src/operation/aggregate/test.rs b/src/operation/aggregate/test.rs deleted file mode 100644 index 67551a5e5..000000000 --- a/src/operation/aggregate/test.rs +++ /dev/null @@ -1,96 +0,0 @@ -use std::time::Duration; - -use crate::{ - bson::doc, - error::{ErrorKind, WriteFailure}, - operation::{ - aggregate::Aggregate, - test::{self, handle_response_test}, - }, - options::AggregateOptions, - Namespace, -}; - -#[test] -fn op_selection_criteria() { - test::op_selection_criteria(|selection_criteria| { - let options = AggregateOptions { - selection_criteria, - ..Default::default() - }; - Aggregate::new("".to_string(), Vec::new(), Some(options)) - }); -} - -#[test] -fn handle_max_await_time() { - let response = doc! { - "ok": 1, - "cursor": { - "id": 123, - "ns": "a.b", - "firstBatch": [] - } - }; - - let aggregate = Aggregate::empty(); - let spec = handle_response_test(&aggregate, response.clone()).unwrap(); - assert!(spec.max_time().is_none()); - - let max_await = Duration::from_millis(123); - let options = AggregateOptions::builder() - .max_await_time(max_await) - .build(); - let aggregate = Aggregate::new(Namespace::empty(), Vec::new(), Some(options)); - let spec = handle_response_test(&aggregate, response).unwrap(); - assert_eq!(spec.max_time(), Some(max_await)); -} - -#[test] -fn handle_write_concern_error() { - let response = doc! { - "ok": 1.0, - "cursor": { - "id": 0, - "ns": "test.test", - "firstBatch": [], - }, - "writeConcernError": { - "code": 64, - "codeName": "WriteConcernFailed", - "errmsg": "Waiting for replication timed out", - "errInfo": { - "wtimeout": true - } - } - }; - - let aggregate = Aggregate::new( - Namespace::empty(), - vec![doc! { "$merge": { "into": "a" } }], - None, - ); - - let error = handle_response_test(&aggregate, response).unwrap_err(); - match *error.kind { - ErrorKind::Write(WriteFailure::WriteConcernError(_)) => {} - ref e => panic!("should have gotten WriteConcernError, got {:?} instead", e), - } -} - -#[test] -fn handle_invalid_response() { - let aggregate = Aggregate::empty(); - - let garbled = doc! { "asdfasf": "ASdfasdf" }; - handle_response_test(&aggregate, garbled).unwrap_err(); - - let missing_cursor_field = doc! { - "ok": 1.0, - "cursor": { - "ns": "test.test", - "firstBatch": [], - } - }; - handle_response_test(&aggregate, missing_cursor_field).unwrap_err(); -} diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 70f0529d7..7c5a6d6f8 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -141,8 +141,8 @@ impl<'a> BulkWrite<'a> { /// A helper struct for tracking namespace information. struct NamespaceInfo<'a> { namespaces: Vec, - /// Cache the namespaces and their indexes to avoid traversing the namespaces array each time a - /// namespace is looked up or added. + // Cache the namespaces and their indexes to avoid traversing the namespaces array each time a + // namespace is looked up or added. cache: HashMap<&'a Namespace, usize>, } @@ -179,7 +179,6 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { const NAME: &'static str = "bulkWrite"; fn build(&mut self, description: &StreamDescription) -> Result> { - let max_doc_size: usize = Checked::new(description.max_bson_object_size).try_into()?; let max_message_size: usize = Checked::new(description.max_message_size_bytes).try_into()?; let max_operations: usize = Checked::new(description.max_write_batch_size).try_into()?; @@ -191,12 +190,14 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { }?; bson_util::extend_raw_document_buf(&mut command_body, options)?; - let max_document_sequences_size = - max_message_size - OP_MSG_OVERHEAD_BYTES - command_body.as_bytes().len(); + let max_document_sequences_size: usize = (Checked::new(max_message_size) + - OP_MSG_OVERHEAD_BYTES + - command_body.as_bytes().len()) + .try_into()?; let mut namespace_info = NamespaceInfo::new(); let mut ops = Vec::new(); - let mut current_size = 0; + let mut current_size = Checked::new(0); for (i, model) in self.models.iter().take(max_operations).enumerate() { let (namespace_index, namespace_size) = namespace_info.get_index(model.namespace()); @@ -206,18 +207,9 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { extend_raw_document_buf(&mut operation, model_doc)?; let operation_size = operation.as_bytes().len(); - if operation_size > max_doc_size { - return Err(ErrorKind::InvalidArgument { - message: format!( - "bulk write operations must be within {} bytes, but document provided is \ - {} bytes", - max_doc_size, operation_size - ), - } - .into()); - } - if current_size + namespace_size + operation_size > max_document_sequences_size { + current_size += namespace_size + operation_size; + if current_size.get()? > max_document_sequences_size { // Remove the namespace doc from the list if one was added for this operation. if namespace_size > 0 { let last_index = namespace_info.namespaces.len() - 1; @@ -229,10 +221,19 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { if let Some(inserted_id) = inserted_id { self.inserted_ids.insert(i, inserted_id); } - current_size += namespace_size + operation_size; ops.push(operation); } + if ops.is_empty() { + return Err(ErrorKind::InvalidArgument { + message: format!( + "operation at index {} exceeds the maximum message size ({} bytes)", + self.offset, max_message_size + ), + } + .into()); + } + self.n_attempted = ops.len(); let mut command = Command::new(Self::NAME, "admin", command_body); diff --git a/src/operation/insert.rs b/src/operation/insert.rs index 7390b20cf..d94c8778d 100644 --- a/src/operation/insert.rs +++ b/src/operation/insert.rs @@ -69,11 +69,13 @@ impl<'a> OperationWithDefaults for Insert<'a> { let options = bson::to_raw_document_buf(&self.options)?; extend_raw_document_buf(&mut command_body, options)?; - let max_document_sequence_size = - max_message_size - OP_MSG_OVERHEAD_BYTES - command_body.as_bytes().len(); + let max_document_sequence_size: usize = (Checked::new(max_message_size) + - OP_MSG_OVERHEAD_BYTES + - command_body.as_bytes().len()) + .try_into()?; let mut docs = Vec::new(); - let mut current_size = 0; + let mut current_size = Checked::new(0); for (i, document) in self.documents.iter().take(max_operations).enumerate() { let mut document = bson::to_raw_document_buf(document)?; let id = get_or_prepend_id_field(&mut document)?; @@ -93,18 +95,21 @@ impl<'a> OperationWithDefaults for Insert<'a> { // From the spec: Drivers MUST not reduce the size limits for a single write before // automatic encryption. I.e. if a single document has size larger than 2MiB (but less // than `maxBsonObjectSize`) proceed with automatic encryption. - if self.encrypted && i != 0 { + if self.encrypted { let doc_entry_size = array_entry_size_bytes(i, document.as_bytes().len())?; - if current_size + doc_entry_size >= MAX_ENCRYPTED_WRITE_SIZE { + current_size += doc_entry_size; + if i != 0 && current_size.get()? >= MAX_ENCRYPTED_WRITE_SIZE { + break; + } + } else { + current_size += doc_size; + if current_size.get()? > max_document_sequence_size { break; } - } else if current_size + doc_size > max_document_sequence_size { - break; } self.inserted_ids.push(id); docs.push(document); - current_size += doc_size; } let mut body = rawdoc! { diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs index 81da582fb..a93320cbb 100644 --- a/src/test/bulk_write.rs +++ b/src/test/bulk_write.rs @@ -22,6 +22,7 @@ async fn run_unified() { .await; } +// CRUD prose test 3 #[tokio::test] async fn max_write_batch_size_batching() { let client = Client::test_builder().monitor_events().build().await; @@ -60,6 +61,7 @@ async fn max_write_batch_size_batching() { assert_eq!(second_len, 1); } +// CRUD prose test 4 #[tokio::test] async fn max_message_size_bytes_batching() { let client = Client::test_builder().monitor_events().build().await; @@ -101,6 +103,7 @@ async fn max_message_size_bytes_batching() { assert_eq!(second_len, 1); } +// CRUD prose test 5 #[tokio::test(flavor = "multi_thread")] async fn write_concern_error_batches() { let mut options = get_client_options().await.clone(); @@ -150,6 +153,7 @@ async fn write_concern_error_batches() { assert_eq!(command_started_events.len(), 2); } +// CRUD prose test 6 #[tokio::test] async fn write_error_batches() { let mut client = Client::test_builder().monitor_events().build().await; @@ -206,6 +210,7 @@ async fn write_error_batches() { assert_eq!(command_started_events.len(), 1); } +// CRUD prose test 7 #[tokio::test] async fn successful_cursor_iteration() { let client = Client::test_builder().monitor_events().build().await; @@ -253,6 +258,62 @@ async fn successful_cursor_iteration() { assert_eq!(command_started_events.len(), 1); } +// CRUD prose test 8 +#[tokio::test] +async fn cursor_iteration_in_a_transaction() { + let client = Client::test_builder().monitor_events().build().await; + + if client.server_version_lt(8, 0) || client.is_standalone() { + log_uncaptured( + "skipping cursor_iteration_in_a_transaction: bulkWrite requires 8.0+, transactions \ + require a non-standalone topology", + ); + return; + } + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + + let collection = client.database("db").collection::("coll"); + collection.drop().await.unwrap(); + + let mut session = client.start_session().await.unwrap(); + session.start_transaction().await.unwrap(); + + let models = vec![ + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "a".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "b".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + ]; + + let result = client + .bulk_write(models) + .verbose_results(true) + .session(&mut session) + .await + .unwrap(); + assert_eq!(result.upserted_count, 2); + assert_eq!(result.update_results.unwrap().len(), 2); + + let command_started_events = client.events.get_command_started_events(&["getMore"]); + assert_eq!(command_started_events.len(), 1); +} + +// CRUD prose test 9 #[tokio::test(flavor = "multi_thread")] async fn failed_cursor_iteration() { let mut options = get_client_options().await.clone(); @@ -330,60 +391,9 @@ async fn failed_cursor_iteration() { assert_eq!(kill_cursors_events.len(), 1); } -#[tokio::test] -async fn cursor_iteration_in_a_transaction() { - let client = Client::test_builder().monitor_events().build().await; - - if client.server_version_lt(8, 0) || client.is_standalone() { - log_uncaptured( - "skipping cursor_iteration_in_a_transaction: bulkWrite requires 8.0+, transactions \ - require a non-standalone topology", - ); - return; - } - - let max_bson_object_size = client.server_info.max_bson_object_size as usize; - - let collection = client.database("db").collection::("coll"); - collection.drop().await.unwrap(); - - let mut session = client.start_session().await.unwrap(); - session.start_transaction().await.unwrap(); - - let models = vec![ - WriteModel::UpdateOne { - namespace: collection.namespace(), - filter: doc! { "_id": "a".repeat(max_bson_object_size / 2) }, - update: doc! { "$set": { "x": 1 } }.into(), - array_filters: None, - collation: None, - hint: None, - upsert: Some(true), - }, - WriteModel::UpdateOne { - namespace: collection.namespace(), - filter: doc! { "_id": "b".repeat(max_bson_object_size / 2) }, - update: doc! { "$set": { "x": 1 } }.into(), - array_filters: None, - collation: None, - hint: None, - upsert: Some(true), - }, - ]; - - let result = client - .bulk_write(models) - .verbose_results(true) - .session(&mut session) - .await - .unwrap(); - assert_eq!(result.upserted_count, 2); - assert_eq!(result.update_results.unwrap().len(), 2); - - let command_started_events = client.events.get_command_started_events(&["getMore"]); - assert_eq!(command_started_events.len(), 1); -} +// CRUD prose test 10 not implemented. The driver does not support unacknowledged writes. +// CRUD prose test 11 #[tokio::test] async fn namespace_batch_splitting() { let first_namespace = Namespace::new("db", "coll"); @@ -487,6 +497,7 @@ async fn namespace_batch_splitting() { assert_eq!(actual_second_namespace, &second_namespace.to_string()); } +// CRUD prose test 12 #[tokio::test] async fn too_large_client_error() { let client = Client::test_builder().monitor_events().build().await; @@ -497,6 +508,7 @@ async fn too_large_client_error() { return; } + // Case 1: document too large let model = WriteModel::InsertOne { namespace: Namespace::new("db", "coll"), document: doc! { "a": "b".repeat(max_message_size_bytes) }, @@ -504,8 +516,18 @@ async fn too_large_client_error() { let error = client.bulk_write(vec![model]).await.unwrap_err(); assert!(!error.is_server_error()); + + // Case 2: namespace too large + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "c".repeat(max_message_size_bytes)), + document: doc! { "a": "b" }, + }; + + let error = client.bulk_write(vec![model]).await.unwrap_err(); + assert!(!error.is_server_error()); } +// CRUD prose test 13 #[cfg(feature = "in-use-encryption-unstable")] #[tokio::test] async fn encryption_error() { diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index b36ece236..6688f5b60 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -53,6 +53,8 @@ async fn retry_releases_connection() { .collection("retry_releases_connection"); collection.insert_one(doc! { "x": 1 }).await.unwrap(); + // Use a connection error to ensure streaming monitor checks get cancelled. Otherwise, we'd have + // to wait for the entire heartbeatFrequencyMS before the find succeeds. let fail_point = FailPoint::fail_command(&["find"], FailPointMode::Times(1)).close_connection(true); let _guard = client.enable_fail_point(fail_point).await.unwrap(); diff --git a/src/test/spec/unified_runner/operation/bulk_write.rs b/src/test/spec/unified_runner/operation/bulk_write.rs index d870e16ab..b16134a05 100644 --- a/src/test/spec/unified_runner/operation/bulk_write.rs +++ b/src/test/spec/unified_runner/operation/bulk_write.rs @@ -60,7 +60,6 @@ impl<'de> Deserialize<'de> for WriteModel { namespace: Namespace, filter: Document, replacement: Document, - array_filters: Option, collation: Option, hint: Option, upsert: Option, @@ -126,7 +125,6 @@ impl<'de> Deserialize<'de> for WriteModel { namespace, filter, replacement, - array_filters, collation, hint, upsert, @@ -134,7 +132,6 @@ impl<'de> Deserialize<'de> for WriteModel { namespace, filter, replacement, - array_filters, collation, hint, upsert, From 11dba57d63ac404a12cea99a9d47a0147215a228 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 13 May 2024 15:51:02 -0600 Subject: [PATCH 73/75] update operation methods --- src/operation.rs | 291 ++++++++++++----------- src/operation/abort_transaction.rs | 15 +- src/operation/aggregate.rs | 54 ++--- src/operation/aggregate/change_stream.rs | 68 +++--- src/operation/bulk_write.rs | 2 +- src/operation/commit_transaction.rs | 11 +- src/operation/count.rs | 11 +- src/operation/count_documents.rs | 11 +- src/operation/create.rs | 12 +- src/operation/create_indexes.rs | 16 +- src/operation/delete.rs | 18 +- src/operation/distinct.rs | 11 +- src/operation/drop_collection.rs | 12 +- src/operation/drop_database.rs | 12 +- src/operation/drop_indexes.rs | 7 +- src/operation/find.rs | 38 ++- src/operation/find_and_modify.rs | 39 ++- src/operation/get_more.rs | 27 +-- src/operation/insert.rs | 68 +++--- src/operation/list_collections.rs | 32 +-- src/operation/list_databases.rs | 11 +- src/operation/list_indexes.rs | 32 +-- src/operation/run_command.rs | 7 +- src/operation/search_index.rs | 51 ++-- src/operation/update.rs | 52 ++-- 25 files changed, 400 insertions(+), 508 deletions(-) diff --git a/src/operation.rs b/src/operation.rs index 0e08a1d34..3ffc7ee7a 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -27,6 +27,7 @@ mod update; use std::{collections::VecDeque, fmt::Debug, ops::Deref}; use bson::{RawBsonRef, RawDocument, RawDocumentBuf, Timestamp}; +use futures_util::FutureExt; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ @@ -88,6 +89,13 @@ pub(crate) struct ExecutionContext<'a> { pub(crate) session: Option<&'a mut ClientSession>, } +#[derive(Debug, PartialEq, Clone, Copy)] +pub(crate) enum Retryability { + Write, + Read, + None, +} + /// A trait modeling the behavior of a server side operation. /// /// No methods in this trait should have default behaviors to ensure that wrapper operations @@ -131,7 +139,7 @@ pub(crate) trait Operation { fn write_concern(&self) -> Option<&WriteConcern>; /// Returns whether or not this command supports the `readConcern` field. - fn supports_read_concern(&self, _description: &StreamDescription) -> bool; + fn supports_read_concern(&self, description: &StreamDescription) -> bool; /// Whether this operation supports sessions or not. fn supports_sessions(&self) -> bool; @@ -147,6 +155,152 @@ pub(crate) trait Operation { fn name(&self) -> &str; } +// A mirror of the `Operation` trait, with default behavior where appropriate. Should only be +// implemented by operation types that do not delegate to other operations. +pub(crate) trait OperationWithDefaults: Send + Sync { + /// The output type of this operation. + type O; + + /// The format of the command body constructed in `build`. + type Command: CommandBody; + + /// The name of the server side command associated with this operation. + const NAME: &'static str; + + /// Returns the command that should be sent to the server as part of this operation. + /// The operation may store some additional state that is required for handling the response. + fn build(&mut self, description: &StreamDescription) -> Result>; + + /// Parse the response for the atClusterTime field. + /// Depending on the operation, this may be found in different locations. + fn extract_at_cluster_time(&self, _response: &RawDocument) -> Result> { + Ok(None) + } + + /// Interprets the server response to the command. + fn handle_response<'a>( + &'a self, + _response: RawCommandResponse, + _context: ExecutionContext<'a>, + ) -> Result { + Err(ErrorKind::Internal { + message: format!("operation handling not implemented for {}", Self::NAME), + } + .into()) + } + + /// Interprets the server response to the command. This method should only be implemented when + /// async code is required to handle the response. + fn handle_response_async<'a>( + &'a self, + response: RawCommandResponse, + context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { + async move { self.handle_response(response, context) }.boxed() + } + + /// Interpret an error encountered while sending the built command to the server, potentially + /// recovering. + fn handle_error(&self, error: Error) -> Result { + Err(error) + } + + /// Criteria to use for selecting the server that this operation will be executed on. + fn selection_criteria(&self) -> Option<&SelectionCriteria> { + None + } + + /// Whether or not this operation will request acknowledgment from the server. + fn is_acknowledged(&self) -> bool { + self.write_concern() + .map(WriteConcern::is_acknowledged) + .unwrap_or(true) + } + + /// The write concern to use for this operation, if any. + fn write_concern(&self) -> Option<&WriteConcern> { + None + } + + /// Returns whether or not this command supports the `readConcern` field. + fn supports_read_concern(&self, _description: &StreamDescription) -> bool { + false + } + + /// Whether this operation supports sessions or not. + fn supports_sessions(&self) -> bool { + true + } + + /// The level of retryability the operation supports. + fn retryability(&self) -> Retryability { + Retryability::None + } + + /// Updates this operation as needed for a retry. + fn update_for_retry(&mut self) {} + + fn pinned_connection(&self) -> Option<&PinnedConnectionHandle> { + None + } + + fn name(&self) -> &str { + Self::NAME + } +} + +impl Operation for T +where + T: Send + Sync, +{ + type O = T::O; + type Command = T::Command; + const NAME: &'static str = T::NAME; + fn build(&mut self, description: &StreamDescription) -> Result> { + self.build(description) + } + fn extract_at_cluster_time(&self, response: &RawDocument) -> Result> { + self.extract_at_cluster_time(response) + } + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { + self.handle_response_async(response, context) + } + fn handle_error(&self, error: Error) -> Result { + self.handle_error(error) + } + fn selection_criteria(&self) -> Option<&SelectionCriteria> { + self.selection_criteria() + } + fn is_acknowledged(&self) -> bool { + self.is_acknowledged() + } + fn write_concern(&self) -> Option<&WriteConcern> { + self.write_concern() + } + fn supports_read_concern(&self, description: &StreamDescription) -> bool { + self.supports_read_concern(description) + } + fn supports_sessions(&self) -> bool { + self.supports_sessions() + } + fn retryability(&self) -> Retryability { + self.retryability() + } + fn update_for_retry(&mut self) { + self.update_for_retry() + } + fn pinned_connection(&self) -> Option<&PinnedConnectionHandle> { + self.pinned_connection() + } + fn name(&self) -> &str { + self.name() + } +} + pub(crate) trait CommandBody: Serialize { fn should_redact(&self) -> bool { false @@ -380,13 +534,6 @@ where } } -#[derive(Debug, PartialEq, Clone, Copy)] -pub(crate) enum Retryability { - Write, - Read, - None, -} - macro_rules! remove_empty_write_concern { ($opts:expr) => { if let Some(ref mut options) = $opts { @@ -400,131 +547,3 @@ macro_rules! remove_empty_write_concern { } pub(crate) use remove_empty_write_concern; - -// A mirror of the `Operation` trait, with default behavior where appropriate. Should only be -// implemented by operation types that do not delegate to other operations. -pub(crate) trait OperationWithDefaults { - /// The output type of this operation. - type O; - - /// The format of the command body constructed in `build`. - type Command: CommandBody; - - /// The name of the server side command associated with this operation. - const NAME: &'static str; - - /// Returns the command that should be sent to the server as part of this operation. - /// The operation may store some additional state that is required for handling the response. - fn build(&mut self, description: &StreamDescription) -> Result>; - - /// Parse the response for the atClusterTime field. - /// Depending on the operation, this may be found in different locations. - fn extract_at_cluster_time(&self, _response: &RawDocument) -> Result> { - Ok(None) - } - - /// Interprets the server response to the command. - fn handle_response<'a>( - &'a self, - response: RawCommandResponse, - context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result>; - - /// Interpret an error encountered while sending the built command to the server, potentially - /// recovering. - fn handle_error(&self, error: Error) -> Result { - Err(error) - } - - /// Criteria to use for selecting the server that this operation will be executed on. - fn selection_criteria(&self) -> Option<&SelectionCriteria> { - None - } - - /// Whether or not this operation will request acknowledgment from the server. - fn is_acknowledged(&self) -> bool { - self.write_concern() - .map(WriteConcern::is_acknowledged) - .unwrap_or(true) - } - - /// The write concern to use for this operation, if any. - fn write_concern(&self) -> Option<&WriteConcern> { - None - } - - /// Returns whether or not this command supports the `readConcern` field. - fn supports_read_concern(&self, _description: &StreamDescription) -> bool { - false - } - - /// Whether this operation supports sessions or not. - fn supports_sessions(&self) -> bool { - true - } - - /// The level of retryability the operation supports. - fn retryability(&self) -> Retryability { - Retryability::None - } - - /// Updates this operation as needed for a retry. - fn update_for_retry(&mut self) {} - - fn pinned_connection(&self) -> Option<&PinnedConnectionHandle> { - None - } - - fn name(&self) -> &str { - Self::NAME - } -} - -impl Operation for T { - type O = T::O; - type Command = T::Command; - const NAME: &'static str = T::NAME; - fn build(&mut self, description: &StreamDescription) -> Result> { - self.build(description) - } - fn extract_at_cluster_time(&self, response: &RawDocument) -> Result> { - self.extract_at_cluster_time(response) - } - fn handle_response<'a>( - &'a self, - response: RawCommandResponse, - context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - self.handle_response(response, context) - } - fn handle_error(&self, error: Error) -> Result { - self.handle_error(error) - } - fn selection_criteria(&self) -> Option<&SelectionCriteria> { - self.selection_criteria() - } - fn is_acknowledged(&self) -> bool { - self.is_acknowledged() - } - fn write_concern(&self) -> Option<&WriteConcern> { - self.write_concern() - } - fn supports_read_concern(&self, description: &StreamDescription) -> bool { - self.supports_read_concern(description) - } - fn supports_sessions(&self) -> bool { - self.supports_sessions() - } - fn retryability(&self) -> Retryability { - self.retryability() - } - fn update_for_retry(&mut self) { - self.update_for_retry() - } - fn pinned_connection(&self) -> Option<&PinnedConnectionHandle> { - self.pinned_connection() - } - fn name(&self) -> &str { - self.name() - } -} diff --git a/src/operation/abort_transaction.rs b/src/operation/abort_transaction.rs index f9fd57441..ef669e26e 100644 --- a/src/operation/abort_transaction.rs +++ b/src/operation/abort_transaction.rs @@ -1,15 +1,11 @@ -use bson::Document; -use futures_util::FutureExt; - use crate::{ - bson::doc, + bson::{doc, Document}, client::session::TransactionPin, cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, error::Result, operation::Retryability, options::WriteConcern, selection_criteria::SelectionCriteria, - BoxFuture, }; use super::{ExecutionContext, OperationWithDefaults, WriteConcernOnlyBody}; @@ -55,12 +51,9 @@ impl OperationWithDefaults for AbortTransaction { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: WriteConcernOnlyBody = response.body()?; - response.validate() - } - .boxed() + ) -> Result { + let response: WriteConcernOnlyBody = response.body()?; + response.validate() } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/aggregate.rs b/src/operation/aggregate.rs index 8b8e8ecfc..ac777aca1 100644 --- a/src/operation/aggregate.rs +++ b/src/operation/aggregate.rs @@ -1,7 +1,5 @@ pub(crate) mod change_stream; -use futures_util::FutureExt; - use crate::{ bson::{doc, Bson, Document}, bson_util, @@ -10,7 +8,6 @@ use crate::{ error::Result, operation::{append_options, remove_empty_write_concern, Retryability}, options::{AggregateOptions, SelectionCriteria, WriteConcern}, - BoxFuture, Namespace, }; @@ -87,33 +84,30 @@ impl OperationWithDefaults for Aggregate { &'a self, response: RawCommandResponse, context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let cursor_response: CursorBody = response.body()?; - - if self.is_out_or_merge() { - let wc_error_info = response.body::()?; - wc_error_info.validate()?; - }; - - let description = context.connection.stream_description()?; - - // The comment should only be propagated to getMore calls on 4.4+. - let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { - None - } else { - self.options.as_ref().and_then(|opts| opts.comment.clone()) - }; - - Ok(CursorSpecification::new( - cursor_response.cursor, - description.server_address.clone(), - self.options.as_ref().and_then(|opts| opts.batch_size), - self.options.as_ref().and_then(|opts| opts.max_await_time), - comment, - )) - } - .boxed() + ) -> Result { + let cursor_response: CursorBody = response.body()?; + + if self.is_out_or_merge() { + let wc_error_info = response.body::()?; + wc_error_info.validate()?; + }; + + let description = context.connection.stream_description()?; + + // The comment should only be propagated to getMore calls on 4.4+. + let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { + None + } else { + self.options.as_ref().and_then(|opts| opts.comment.clone()) + }; + + Ok(CursorSpecification::new( + cursor_response.cursor, + description.server_address.clone(), + self.options.as_ref().and_then(|opts| opts.batch_size), + self.options.as_ref().and_then(|opts| opts.max_await_time), + comment, + )) } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/aggregate/change_stream.rs b/src/operation/aggregate/change_stream.rs index 3fe6a888b..e821d7ab2 100644 --- a/src/operation/aggregate/change_stream.rs +++ b/src/operation/aggregate/change_stream.rs @@ -1,5 +1,3 @@ -use futures_util::FutureExt; - use crate::{ bson::{doc, Document}, change_stream::{event::ResumeToken, ChangeStreamData, WatchArgs}, @@ -8,7 +6,6 @@ use crate::{ error::Result, operation::{append_options, ExecutionContext, OperationWithDefaults, Retryability}, options::{ChangeStreamOptions, SelectionCriteria, WriteConcern}, - BoxFuture, }; use super::Aggregate; @@ -89,41 +86,38 @@ impl OperationWithDefaults for ChangeStreamAggregate { &'a self, response: RawCommandResponse, mut context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let op_time = response - .raw_body() - .get("operationTime")? - .and_then(bson::RawBsonRef::as_timestamp); - - let inner_context = ExecutionContext { - connection: context.connection, - session: context.session.as_deref_mut(), - }; - let spec = self.inner.handle_response(response, inner_context).await?; - - let mut data = ChangeStreamData { - resume_token: ResumeToken::initial(self.args.options.as_ref(), &spec), - ..ChangeStreamData::default() - }; - let has_no_time = |o: &ChangeStreamOptions| { - o.start_at_operation_time.is_none() - && o.resume_after.is_none() - && o.start_after.is_none() - }; - - let description = context.connection.stream_description()?; - if self.args.options.as_ref().map_or(true, has_no_time) - && description.max_wire_version.map_or(false, |v| v >= 7) - && spec.initial_buffer.is_empty() - && spec.post_batch_resume_token.is_none() - { - data.initial_operation_time = op_time; - } - - Ok((spec, data)) + ) -> Result { + let op_time = response + .raw_body() + .get("operationTime")? + .and_then(bson::RawBsonRef::as_timestamp); + + let inner_context = ExecutionContext { + connection: context.connection, + session: context.session.as_deref_mut(), + }; + let spec = self.inner.handle_response(response, inner_context)?; + + let mut data = ChangeStreamData { + resume_token: ResumeToken::initial(self.args.options.as_ref(), &spec), + ..ChangeStreamData::default() + }; + let has_no_time = |o: &ChangeStreamOptions| { + o.start_at_operation_time.is_none() + && o.resume_after.is_none() + && o.start_after.is_none() + }; + + let description = context.connection.stream_description()?; + if self.args.options.as_ref().map_or(true, has_no_time) + && description.max_wire_version.map_or(false, |v| v >= 7) + && spec.initial_buffer.is_empty() + && spec.post_batch_resume_token.is_none() + { + data.initial_operation_time = op_time; } - .boxed() + + Ok((spec, data)) } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs index 7c5a6d6f8..18433fe9f 100644 --- a/src/operation/bulk_write.rs +++ b/src/operation/bulk_write.rs @@ -242,7 +242,7 @@ impl<'a> OperationWithDefaults for BulkWrite<'a> { Ok(command) } - fn handle_response<'b>( + fn handle_response_async<'b>( &'b self, response: RawCommandResponse, context: ExecutionContext<'b>, diff --git a/src/operation/commit_transaction.rs b/src/operation/commit_transaction.rs index 6c621cbf3..8de7cd5b4 100644 --- a/src/operation/commit_transaction.rs +++ b/src/operation/commit_transaction.rs @@ -1,14 +1,12 @@ use std::time::Duration; use bson::{doc, Document}; -use futures_util::FutureExt; use crate::{ cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, operation::{append_options, remove_empty_write_concern, OperationWithDefaults, Retryability}, options::{Acknowledgment, TransactionOptions, WriteConcern}, - BoxFuture, }; use super::{ExecutionContext, WriteConcernOnlyBody}; @@ -48,12 +46,9 @@ impl OperationWithDefaults for CommitTransaction { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: WriteConcernOnlyBody = response.body()?; - response.validate() - } - .boxed() + ) -> Result { + let response: WriteConcernOnlyBody = response.body()?; + response.validate() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/count.rs b/src/operation/count.rs index 201e9f79d..daf35753b 100644 --- a/src/operation/count.rs +++ b/src/operation/count.rs @@ -1,4 +1,3 @@ -use futures_util::FutureExt; use serde::Deserialize; use crate::{ @@ -8,7 +7,6 @@ use crate::{ error::{Error, Result}, operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::SelectionCriteria, - BoxFuture, }; use super::ExecutionContext; @@ -49,12 +47,9 @@ impl OperationWithDefaults for Count { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'static, Result> { - async move { - let response_body: ResponseBody = response.body()?; - Ok(response_body.n) - } - .boxed() + ) -> Result { + let response_body: ResponseBody = response.body()?; + Ok(response_body.n) } fn handle_error(&self, error: Error) -> Result { diff --git a/src/operation/count_documents.rs b/src/operation/count_documents.rs index 97a90409c..b61503f27 100644 --- a/src/operation/count_documents.rs +++ b/src/operation/count_documents.rs @@ -1,6 +1,5 @@ use std::convert::TryInto; -use futures_util::FutureExt; use serde::Deserialize; use crate::{ @@ -10,7 +9,6 @@ use crate::{ operation::aggregate::Aggregate, options::{AggregateOptions, CountOptions}, selection_criteria::SelectionCriteria, - BoxFuture, Namespace, }; @@ -94,12 +92,9 @@ impl OperationWithDefaults for CountDocuments { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: SingleCursorResult = response.body()?; - Ok(response.0.map(|r| r.n).unwrap_or(0)) - } - .boxed() + ) -> Result { + let response: SingleCursorResult = response.body()?; + Ok(response.0.map(|r| r.n).unwrap_or(0)) } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/create.rs b/src/operation/create.rs index 8d375c4fa..e26b73925 100644 --- a/src/operation/create.rs +++ b/src/operation/create.rs @@ -1,5 +1,3 @@ -use futures_util::FutureExt; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -11,7 +9,6 @@ use crate::{ WriteConcernOnlyBody, }, options::{CreateCollectionOptions, WriteConcern}, - BoxFuture, Namespace, }; @@ -54,12 +51,9 @@ impl OperationWithDefaults for Create { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: WriteConcernOnlyBody = response.body()?; - response.validate() - } - .boxed() + ) -> Result { + let response: WriteConcernOnlyBody = response.body()?; + response.validate() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/create_indexes.rs b/src/operation/create_indexes.rs index b17ea13de..3135cc37a 100644 --- a/src/operation/create_indexes.rs +++ b/src/operation/create_indexes.rs @@ -1,5 +1,3 @@ -use futures_util::FutureExt; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -8,7 +6,6 @@ use crate::{ operation::{append_options, remove_empty_write_concern, OperationWithDefaults}, options::{CreateIndexOptions, WriteConcern}, results::CreateIndexesResult, - BoxFuture, Namespace, }; @@ -77,14 +74,11 @@ impl OperationWithDefaults for CreateIndexes { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: WriteConcernOnlyBody = response.body()?; - response.validate()?; - let index_names = self.indexes.iter().filter_map(|i| i.get_name()).collect(); - Ok(CreateIndexesResult { index_names }) - } - .boxed() + ) -> Result { + let response: WriteConcernOnlyBody = response.body()?; + response.validate()?; + let index_names = self.indexes.iter().filter_map(|i| i.get_name()).collect(); + Ok(CreateIndexesResult { index_names }) } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/delete.rs b/src/operation/delete.rs index 1a13184c2..52b181405 100644 --- a/src/operation/delete.rs +++ b/src/operation/delete.rs @@ -1,5 +1,3 @@ -use futures_util::FutureExt; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -15,7 +13,6 @@ use crate::{ }, options::{DeleteOptions, Hint, WriteConcern}, results::DeleteResult, - BoxFuture, }; use super::ExecutionContext; @@ -88,16 +85,13 @@ impl OperationWithDefaults for Delete { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: WriteResponseBody = response.body()?; - response.validate().map_err(convert_bulk_errors)?; + ) -> Result { + let response: WriteResponseBody = response.body()?; + response.validate().map_err(convert_bulk_errors)?; - Ok(DeleteResult { - deleted_count: response.n, - }) - } - .boxed() + Ok(DeleteResult { + deleted_count: response.n, + }) } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/distinct.rs b/src/operation/distinct.rs index addb093b0..8888cecf0 100644 --- a/src/operation/distinct.rs +++ b/src/operation/distinct.rs @@ -1,4 +1,3 @@ -use futures_util::FutureExt; use serde::Deserialize; use crate::{ @@ -8,7 +7,6 @@ use crate::{ error::Result, operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::SelectionCriteria, - BoxFuture, }; use super::ExecutionContext; @@ -75,12 +73,9 @@ impl OperationWithDefaults for Distinct { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: Response = response.body()?; - Ok(response.values) - } - .boxed() + ) -> Result { + let response: Response = response.body()?; + Ok(response.values) } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/drop_collection.rs b/src/operation/drop_collection.rs index d59f64a18..40c5dac91 100644 --- a/src/operation/drop_collection.rs +++ b/src/operation/drop_collection.rs @@ -1,5 +1,3 @@ -use futures_util::FutureExt; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -11,7 +9,6 @@ use crate::{ WriteConcernOnlyBody, }, options::{DropCollectionOptions, WriteConcern}, - BoxFuture, Namespace, }; @@ -54,12 +51,9 @@ impl OperationWithDefaults for DropCollection { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: WriteConcernOnlyBody = response.body()?; - response.validate() - } - .boxed() + ) -> Result { + let response: WriteConcernOnlyBody = response.body()?; + response.validate() } fn handle_error(&self, error: Error) -> Result { diff --git a/src/operation/drop_database.rs b/src/operation/drop_database.rs index f74612524..d88761094 100644 --- a/src/operation/drop_database.rs +++ b/src/operation/drop_database.rs @@ -1,5 +1,3 @@ -use futures_util::FutureExt; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -12,7 +10,6 @@ use crate::{ WriteConcernOnlyBody, }, options::WriteConcern, - BoxFuture, }; use super::ExecutionContext; @@ -54,12 +51,9 @@ impl OperationWithDefaults for DropDatabase { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: WriteConcernOnlyBody = response.body()?; - response.validate() - } - .boxed() + ) -> Result { + let response: WriteConcernOnlyBody = response.body()?; + response.validate() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/drop_indexes.rs b/src/operation/drop_indexes.rs index 98051818e..e2442bbee 100644 --- a/src/operation/drop_indexes.rs +++ b/src/operation/drop_indexes.rs @@ -1,12 +1,9 @@ -use futures_util::FutureExt; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, operation::{append_options, remove_empty_write_concern, OperationWithDefaults}, options::{DropIndexOptions, WriteConcern}, - BoxFuture, Namespace, }; @@ -49,8 +46,8 @@ impl OperationWithDefaults for DropIndexes { &'a self, _response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { Ok(()) }.boxed() + ) -> Result { + Ok(()) } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/find.rs b/src/operation/find.rs index d3da31ed3..e7e077606 100644 --- a/src/operation/find.rs +++ b/src/operation/find.rs @@ -1,5 +1,3 @@ -use futures_util::FutureExt; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -13,7 +11,6 @@ use crate::{ SERVER_4_4_0_WIRE_VERSION, }, options::{CursorType, FindOptions, SelectionCriteria}, - BoxFuture, Namespace, }; @@ -98,28 +95,25 @@ impl OperationWithDefaults for Find { &'a self, response: RawCommandResponse, context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: CursorBody = response.body()?; + ) -> Result { + let response: CursorBody = response.body()?; - let description = context.connection.stream_description()?; + let description = context.connection.stream_description()?; - // The comment should only be propagated to getMore calls on 4.4+. - let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { - None - } else { - self.options.as_ref().and_then(|opts| opts.comment.clone()) - }; + // The comment should only be propagated to getMore calls on 4.4+. + let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { + None + } else { + self.options.as_ref().and_then(|opts| opts.comment.clone()) + }; - Ok(CursorSpecification::new( - response.cursor, - description.server_address.clone(), - self.options.as_ref().and_then(|opts| opts.batch_size), - self.options.as_ref().and_then(|opts| opts.max_await_time), - comment, - )) - } - .boxed() + Ok(CursorSpecification::new( + response.cursor, + description.server_address.clone(), + self.options.as_ref().and_then(|opts| opts.batch_size), + self.options.as_ref().and_then(|opts| opts.max_await_time), + comment, + )) } fn supports_read_concern(&self, _description: &StreamDescription) -> bool { diff --git a/src/operation/find_and_modify.rs b/src/operation/find_and_modify.rs index 52d75fb5c..3c76813b1 100644 --- a/src/operation/find_and_modify.rs +++ b/src/operation/find_and_modify.rs @@ -2,7 +2,6 @@ pub(crate) mod options; use std::{fmt::Debug, marker::PhantomData}; -use futures_util::FutureExt; use serde::{de::DeserializeOwned, Deserialize}; use self::options::FindAndModifyOptions; @@ -20,7 +19,6 @@ use crate::{ Retryability, }, options::WriteConcern, - BoxFuture, }; use super::{ExecutionContext, UpdateOrReplace}; @@ -101,28 +99,25 @@ impl OperationWithDefaults for FindAndModify { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - #[derive(Debug, Deserialize)] - pub(crate) struct Response { - value: RawBson, - } - let response: Response = response.body()?; - - match response.value { - RawBson::Document(doc) => Ok(Some(from_slice(doc.as_bytes())?)), - RawBson::Null => Ok(None), - other => Err(ErrorKind::InvalidResponse { - message: format!( - "expected document for value field of findAndModify response, but instead \ - got {:?}", - other - ), - } - .into()), + ) -> Result { + #[derive(Debug, Deserialize)] + pub(crate) struct Response { + value: RawBson, + } + let response: Response = response.body()?; + + match response.value { + RawBson::Document(doc) => Ok(Some(from_slice(doc.as_bytes())?)), + RawBson::Null => Ok(None), + other => Err(ErrorKind::InvalidResponse { + message: format!( + "expected document for value field of findAndModify response, but instead got \ + {:?}", + other + ), } + .into()), } - .boxed() } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/get_more.rs b/src/operation/get_more.rs index 55915e65e..aa69f3412 100644 --- a/src/operation/get_more.rs +++ b/src/operation/get_more.rs @@ -1,6 +1,5 @@ use std::{collections::VecDeque, time::Duration}; -use futures_util::FutureExt; use serde::Deserialize; use crate::{ @@ -12,7 +11,6 @@ use crate::{ operation::OperationWithDefaults, options::SelectionCriteria, results::GetMoreResult, - BoxFuture, Namespace, }; @@ -91,21 +89,16 @@ impl<'conn> OperationWithDefaults for GetMore<'conn> { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: GetMoreResponseBody = response.body()?; - - Ok(GetMoreResult { - batch: response.cursor.next_batch, - exhausted: response.cursor.id == 0, - post_batch_resume_token: ResumeToken::from_raw( - response.cursor.post_batch_resume_token, - ), - id: response.cursor.id, - ns: Namespace::from_str(response.cursor.ns.as_str()).unwrap(), - }) - } - .boxed() + ) -> Result { + let response: GetMoreResponseBody = response.body()?; + + Ok(GetMoreResult { + batch: response.cursor.next_batch, + exhausted: response.cursor.id == 0, + post_batch_resume_token: ResumeToken::from_raw(response.cursor.post_batch_resume_token), + id: response.cursor.id, + ns: Namespace::from_str(response.cursor.ns.as_str()).unwrap(), + }) } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/insert.rs b/src/operation/insert.rs index d94c8778d..702485a38 100644 --- a/src/operation/insert.rs +++ b/src/operation/insert.rs @@ -1,7 +1,5 @@ use std::collections::HashMap; -use futures_util::FutureExt; - use crate::{ bson::{rawdoc, Bson, RawDocument, RawDocumentBuf}, bson_util::{ @@ -16,7 +14,6 @@ use crate::{ operation::{OperationWithDefaults, Retryability, WriteResponseBody}, options::{InsertManyOptions, WriteConcern}, results::InsertManyResult, - BoxFuture, Namespace, }; @@ -134,45 +131,42 @@ impl<'a> OperationWithDefaults for Insert<'a> { &'b self, response: RawCommandResponse, _context: ExecutionContext<'b>, - ) -> BoxFuture<'b, Result> { - async move { - let response: WriteResponseBody = response.body_utf8_lossy()?; - let response_n = Checked::::try_from(response.n)?; - - let mut map = HashMap::new(); - if self.options.ordered == Some(true) { - // in ordered inserts, only the first n were attempted. - for (i, id) in self.inserted_ids.iter().enumerate().take(response_n.get()?) { - map.insert(i, id.clone()); - } - } else { - // for unordered, add all the attempted ids and then remove the ones that have - // associated write errors. - for (i, id) in self.inserted_ids.iter().enumerate() { - map.insert(i, id.clone()); - } - - if let Some(write_errors) = response.write_errors.as_ref() { - for err in write_errors { - map.remove(&err.index); - } - } + ) -> Result { + let response: WriteResponseBody = response.body_utf8_lossy()?; + let response_n = Checked::::try_from(response.n)?; + + let mut map = HashMap::new(); + if self.options.ordered == Some(true) { + // in ordered inserts, only the first n were attempted. + for (i, id) in self.inserted_ids.iter().enumerate().take(response_n.get()?) { + map.insert(i, id.clone()); + } + } else { + // for unordered, add all the attempted ids and then remove the ones that have + // associated write errors. + for (i, id) in self.inserted_ids.iter().enumerate() { + map.insert(i, id.clone()); } - if response.write_errors.is_some() || response.write_concern_error.is_some() { - return Err(Error::new( - ErrorKind::BulkWrite(BulkWriteFailure { - write_errors: response.write_errors, - write_concern_error: response.write_concern_error, - inserted_ids: map, - }), - response.labels, - )); + if let Some(write_errors) = response.write_errors.as_ref() { + for err in write_errors { + map.remove(&err.index); + } } + } - Ok(InsertManyResult { inserted_ids: map }) + if response.write_errors.is_some() || response.write_concern_error.is_some() { + return Err(Error::new( + ErrorKind::BulkWrite(BulkWriteFailure { + write_errors: response.write_errors, + write_concern_error: response.write_concern_error, + inserted_ids: map, + }), + response.labels, + )); } - .boxed() + + Ok(InsertManyResult { inserted_ids: map }) } fn write_concern(&self) -> Option<&WriteConcern> { diff --git a/src/operation/list_collections.rs b/src/operation/list_collections.rs index 0b199e9f2..f2c3328c0 100644 --- a/src/operation/list_collections.rs +++ b/src/operation/list_collections.rs @@ -1,5 +1,3 @@ -use futures_util::FutureExt; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -7,7 +5,6 @@ use crate::{ error::Result, operation::{append_options, CursorBody, OperationWithDefaults, Retryability}, options::{ListCollectionsOptions, ReadPreference, SelectionCriteria}, - BoxFuture, }; use super::ExecutionContext; @@ -61,22 +58,19 @@ impl OperationWithDefaults for ListCollections { &'a self, response: RawCommandResponse, context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: CursorBody = response.body()?; - Ok(CursorSpecification::new( - response.cursor, - context - .connection - .stream_description()? - .server_address - .clone(), - self.options.as_ref().and_then(|opts| opts.batch_size), - None, - None, - )) - } - .boxed() + ) -> Result { + let response: CursorBody = response.body()?; + Ok(CursorSpecification::new( + response.cursor, + context + .connection + .stream_description()? + .server_address + .clone(), + self.options.as_ref().and_then(|opts| opts.batch_size), + None, + None, + )) } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/list_databases.rs b/src/operation/list_databases.rs index d2dcf646f..21d43af19 100644 --- a/src/operation/list_databases.rs +++ b/src/operation/list_databases.rs @@ -1,4 +1,3 @@ -use futures_util::FutureExt; use serde::Deserialize; use crate::{ @@ -8,7 +7,6 @@ use crate::{ error::Result, operation::{append_options, OperationWithDefaults, Retryability}, selection_criteria::{ReadPreference, SelectionCriteria}, - BoxFuture, }; use super::ExecutionContext; @@ -50,12 +48,9 @@ impl OperationWithDefaults for ListDatabases { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: Response = response.body()?; - Ok(response.databases) - } - .boxed() + ) -> Result { + let response: Response = response.body()?; + Ok(response.databases) } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/list_indexes.rs b/src/operation/list_indexes.rs index 240b81b3b..f365a77fd 100644 --- a/src/operation/list_indexes.rs +++ b/src/operation/list_indexes.rs @@ -1,5 +1,3 @@ -use futures_util::FutureExt; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -8,7 +6,6 @@ use crate::{ operation::{append_options, OperationWithDefaults}, options::ListIndexesOptions, selection_criteria::{ReadPreference, SelectionCriteria}, - BoxFuture, Namespace, }; @@ -51,22 +48,19 @@ impl OperationWithDefaults for ListIndexes { &'a self, response: RawCommandResponse, context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: CursorBody = response.body()?; - Ok(CursorSpecification::new( - response.cursor, - context - .connection - .stream_description()? - .server_address - .clone(), - self.options.as_ref().and_then(|o| o.batch_size), - self.options.as_ref().and_then(|o| o.max_time), - None, - )) - } - .boxed() + ) -> Result { + let response: CursorBody = response.body()?; + Ok(CursorSpecification::new( + response.cursor, + context + .connection + .stream_description()? + .server_address + .clone(), + self.options.as_ref().and_then(|o| o.batch_size), + self.options.as_ref().and_then(|o| o.max_time), + None, + )) } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/run_command.rs b/src/operation/run_command.rs index 48552de96..b6e92a240 100644 --- a/src/operation/run_command.rs +++ b/src/operation/run_command.rs @@ -1,14 +1,11 @@ use std::convert::TryInto; -use futures_util::FutureExt; - use crate::{ bson::{Document, RawBsonRef, RawDocumentBuf}, client::SESSIONS_UNSUPPORTED_COMMANDS, cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, error::{ErrorKind, Result}, selection_criteria::SelectionCriteria, - BoxFuture, }; use super::{CursorBody, ExecutionContext, OperationWithDefaults}; @@ -97,8 +94,8 @@ impl<'conn> OperationWithDefaults for RunCommand<'conn> { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { Ok(response.into_raw_document_buf().try_into()?) }.boxed() + ) -> Result { + Ok(response.into_raw_document_buf().try_into()?) } fn selection_criteria(&self) -> Option<&SelectionCriteria> { diff --git a/src/operation/search_index.rs b/src/operation/search_index.rs index 819a7e0bf..422fcd241 100644 --- a/src/operation/search_index.rs +++ b/src/operation/search_index.rs @@ -1,11 +1,9 @@ -use futures_util::FutureExt; use serde::Deserialize; use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse}, error::Result, - BoxFuture, Namespace, SearchIndexModel, }; @@ -44,29 +42,26 @@ impl OperationWithDefaults for CreateSearchIndexes { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - #[derive(Debug, Deserialize)] - #[serde(rename_all = "camelCase")] - struct Response { - indexes_created: Vec, - } - - #[derive(Debug, Deserialize)] - struct CreatedIndex { - #[allow(unused)] - id: String, - name: String, - } - - let response: Response = response.body()?; - Ok(response - .indexes_created - .into_iter() - .map(|ci| ci.name) - .collect()) + ) -> Result { + #[derive(Debug, Deserialize)] + #[serde(rename_all = "camelCase")] + struct Response { + indexes_created: Vec, } - .boxed() + + #[derive(Debug, Deserialize)] + struct CreatedIndex { + #[allow(unused)] + id: String, + name: String, + } + + let response: Response = response.body()?; + Ok(response + .indexes_created + .into_iter() + .map(|ci| ci.name) + .collect()) } fn supports_sessions(&self) -> bool { @@ -119,8 +114,8 @@ impl OperationWithDefaults for UpdateSearchIndex { &'a self, _response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { Ok(()) }.boxed() + ) -> Result { + Ok(()) } fn supports_sessions(&self) -> bool { @@ -167,8 +162,8 @@ impl OperationWithDefaults for DropSearchIndex { &'a self, _response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { Ok(()) }.boxed() + ) -> Result { + Ok(()) } fn handle_error(&self, error: crate::error::Error) -> Result { diff --git a/src/operation/update.rs b/src/operation/update.rs index ea497aa94..7df20ad3a 100644 --- a/src/operation/update.rs +++ b/src/operation/update.rs @@ -1,5 +1,3 @@ -use futures_util::FutureExt; - use serde::Deserialize; use crate::{ @@ -10,7 +8,6 @@ use crate::{ operation::{OperationWithDefaults, Retryability, WriteResponseBody}, options::{UpdateModifications, UpdateOptions, WriteConcern}, results::UpdateResult, - BoxFuture, Namespace, }; @@ -166,32 +163,29 @@ impl OperationWithDefaults for Update { &'a self, response: RawCommandResponse, _context: ExecutionContext<'a>, - ) -> BoxFuture<'a, Result> { - async move { - let response: WriteResponseBody = response.body_utf8_lossy()?; - response.validate().map_err(convert_bulk_errors)?; - - let modified_count = response.n_modified; - let upserted_id = response - .upserted - .as_ref() - .and_then(|v| v.first()) - .and_then(|doc| doc.get("_id")) - .cloned(); - - let matched_count = if upserted_id.is_some() { - 0 - } else { - response.body.n - }; - - Ok(UpdateResult { - matched_count, - modified_count, - upserted_id, - }) - } - .boxed() + ) -> Result { + let response: WriteResponseBody = response.body_utf8_lossy()?; + response.validate().map_err(convert_bulk_errors)?; + + let modified_count = response.n_modified; + let upserted_id = response + .upserted + .as_ref() + .and_then(|v| v.first()) + .and_then(|doc| doc.get("_id")) + .cloned(); + + let matched_count = if upserted_id.is_some() { + 0 + } else { + response.body.n + }; + + Ok(UpdateResult { + matched_count, + modified_count, + upserted_id, + }) } fn write_concern(&self) -> Option<&WriteConcern> { From 81bde91a34625e0a54bf2003d9c4fdbb31a984f8 Mon Sep 17 00:00:00 2001 From: Abraham Egnor Date: Mon, 13 May 2024 10:34:36 -0400 Subject: [PATCH 74/75] RUST-1921 Sign crate on release (#1095) --- .evergreen/release-build-papertrail-vars.sh | 18 --------- .evergreen/release-build-vars.sh | 29 ++++++++++++++ .evergreen/release-sign.sh | 17 +++++++++ .evergreen/releases.yml | 42 +++++++++++++++------ 4 files changed, 77 insertions(+), 29 deletions(-) delete mode 100644 .evergreen/release-build-papertrail-vars.sh create mode 100644 .evergreen/release-build-vars.sh create mode 100644 .evergreen/release-sign.sh diff --git a/.evergreen/release-build-papertrail-vars.sh b/.evergreen/release-build-papertrail-vars.sh deleted file mode 100644 index 7e64f2bf8..000000000 --- a/.evergreen/release-build-papertrail-vars.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -set -o errexit -set +x - -. ${DRIVERS_TOOLS}/.evergreen/secrets_handling/setup-secrets.sh drivers/rust -rm secrets-export.sh - -PAPERTRAIL_PRODUCT="rust-driver" -if [[ "${DRY_RUN}" == "yes" ]]; then - PAPERTRAIL_PRODUCT="rust-driver-testing" -fi - -cat <papertrail-expansion.yml -PAPERTRAIL_KEY_ID: "${PAPERTRAIL_KEY_ID}" -PAPERTRAIL_SECRET_KEY: "${PAPERTRAIL_SECRET_KEY}" -PAPERTRAIL_PRODUCT: "${PAPERTRAIL_PRODUCT}" -EOT diff --git a/.evergreen/release-build-vars.sh b/.evergreen/release-build-vars.sh new file mode 100644 index 000000000..81ffac392 --- /dev/null +++ b/.evergreen/release-build-vars.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +set -o errexit +set -o pipefail + +source ./.evergreen/env.sh + +set +x + +CRATE_VERSION=$(cargo metadata --format-version=1 --no-deps | jq --raw-output '.packages[0].version') + +. ${DRIVERS_TOOLS}/.evergreen/secrets_handling/setup-secrets.sh drivers/rust +rm secrets-export.sh + +PAPERTRAIL_PRODUCT="rust-driver" +if [[ "${DRY_RUN}" == "yes" ]]; then + PAPERTRAIL_PRODUCT="rust-driver-testing" +fi + +cat <release-expansion.yml +CRATE_VERSION: "${CRATE_VERSION}" +PAPERTRAIL_KEY_ID: "${PAPERTRAIL_KEY_ID}" +PAPERTRAIL_SECRET_KEY: "${PAPERTRAIL_SECRET_KEY}" +PAPERTRAIL_PRODUCT: "${PAPERTRAIL_PRODUCT}" +ARTIFACTORY_USERNAME: "${ARTIFACTORY_USERNAME}" +ARTIFACTORY_PASSWORD: "${ARTIFACTORY_PASSWORD}" +GARASIGN_USERNAME: "${GARASIGN_USERNAME}" +GARASIGN_PASSWORD: "${GARASIGN_PASSWORD}" +EOT diff --git a/.evergreen/release-sign.sh b/.evergreen/release-sign.sh new file mode 100644 index 000000000..dba3c169e --- /dev/null +++ b/.evergreen/release-sign.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -o errexit +set +x + +echo "${ARTIFACTORY_PASSWORD}" | docker login --password-stdin --username ${ARTIFACTORY_USERNAME} artifactory.corp.mongodb.com + +echo "GRS_CONFIG_USER1_USERNAME=${GARASIGN_USERNAME}" >> "signing-envfile" +echo "GRS_CONFIG_USER1_PASSWORD=${GARASIGN_PASSWORD}" >> "signing-envfile" + +docker run \ + --env-file=signing-envfile \ + --rm \ + -v $(pwd):$(pwd) \ + -w $(pwd) \ + artifactory.corp.mongodb.com/release-tools-container-registry-local/garasign-gpg \ + /bin/bash -c "gpgloader && gpg --yes -v --armor -o mongodb-${CRATE_VERSION}.sig --detach-sign target/package/mongodb-${CRATE_VERSION}.crate" diff --git a/.evergreen/releases.yml b/.evergreen/releases.yml index 3fba2263f..0da4a3519 100644 --- a/.evergreen/releases.yml +++ b/.evergreen/releases.yml @@ -90,26 +90,27 @@ functions: args: - .evergreen/fetch-drivers-tools.sh - "build papertrail vars": + "build vars": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + - command: subprocess.exec params: working_dir: src - include_expansions_in_env: - - DRIVERS_TOOLS - - DRY_RUN - - GIT_TAG + add_expansions_to_env: true binary: bash args: - - .evergreen/release-build-papertrail-vars.sh + - .evergreen/release-build-vars.sh - command: expansions.update params: - file: src/papertrail-expansion.yml + file: src/release-expansion.yml - command: shell.exec params: working_dir: "src" - script: rm papertrail-expansion.yml + script: rm release-expansion.yml "fetch tag": command: subprocess.exec @@ -137,9 +138,26 @@ functions: key_id: ${PAPERTRAIL_KEY_ID} secret_key: ${PAPERTRAIL_SECRET_KEY} product: ${PAPERTRAIL_PRODUCT} - version: ${GIT_TAG} + version: ${CRATE_VERSION} filenames: - - src/target/package/mongodb-*.crate + - src/target/package/mongodb-${CRATE_VERSION}.crate + + "sign release": + - command: subprocess.exec + params: + working_dir: "src" + include_expansions_in_env: + - ARTIFACTORY_USERNAME + - ARTIFACTORY_PASSWORD + - GARASIGN_USERNAME + - GARASIGN_PASSWORD + - CRATE_VERSION + binary: bash + args: + - .evergreen/release-sign.sh + + "save signature": + command: s3.push tasks: - name: "publish-release" @@ -147,9 +165,11 @@ tasks: - func: "fetch source" - func: "install dependencies" - func: "fetch tag" - - func: "build papertrail vars" + - func: "build vars" - func: "publish release" - func: "publish papertrail" + - func: "sign release" + - func: "save signature" axes: - id: "os" From d425f73c00f3cdb3a8c9d14598465bb078f3f5d1 Mon Sep 17 00:00:00 2001 From: Isabel Atkinson Date: Mon, 13 May 2024 09:17:24 -0600 Subject: [PATCH 75/75] RUST-1945 Add a `with_type` method to the `Aggregate` action (#1100) --- src/action/aggregate.rs | 72 +++++++++++++++++++++++++++++++++-------- src/test/coll.rs | 43 ++++++++++++++++++++++++ src/test/db.rs | 31 ++++++++++++++++++ 3 files changed, 132 insertions(+), 14 deletions(-) diff --git a/src/action/aggregate.rs b/src/action/aggregate.rs index 7872ace6f..785f8619b 100644 --- a/src/action/aggregate.rs +++ b/src/action/aggregate.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{marker::PhantomData, time::Duration}; use bson::Document; @@ -24,8 +24,9 @@ impl Database { /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// `await` will return d[`Result>`] or d[`Result>`] if - /// a `ClientSession` is provided. + /// `await` will return d[`Result>`]. If a [`ClientSession`] was provided, the + /// returned cursor will be a [`SessionCursor`]. If [`with_type`](Aggregate::with_type) was + /// called, the returned cursor will be generic over the `T` specified. #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { Aggregate { @@ -33,6 +34,7 @@ impl Database { pipeline: pipeline.into_iter().collect(), options: None, session: ImplicitSession, + _phantom: PhantomData, } } } @@ -46,8 +48,9 @@ where /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// `await` will return d[`Result>`] or d[`Result>`] if - /// a [`ClientSession`] is provided. + /// `await` will return d[`Result>`]. If a [`ClientSession`] was provided, the + /// returned cursor will be a [`SessionCursor`]. If [`with_type`](Aggregate::with_type) was + /// called, the returned cursor will be generic over the `T` specified. #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { Aggregate { @@ -55,6 +58,7 @@ where pipeline: pipeline.into_iter().collect(), options: None, session: ImplicitSession, + _phantom: PhantomData, } } } @@ -66,8 +70,10 @@ impl crate::sync::Database { /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// [`run`](Aggregate::run) will return d[`Result>`] or - /// d[`Result>`] if a [`ClientSession`] is provided. + /// [`run`](Aggregate::run) will return d[Result>`]. If a + /// [`crate::sync::ClientSession`] was provided, the returned cursor will be a + /// [`crate::sync::SessionCursor`]. If [`with_type`](Aggregate::with_type) was called, the + /// returned cursor will be generic over the `T` specified. #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { self.async_database.aggregate(pipeline) @@ -84,8 +90,10 @@ where /// See the documentation [here](https://www.mongodb.com/docs/manual/aggregation/) for more /// information on aggregations. /// - /// [`run`](Aggregate::run) will return d[`Result>`] or - /// d[`Result>`] if a `ClientSession` is provided. + /// [`run`](Aggregate::run) will return d[Result>`]. If a + /// `crate::sync::ClientSession` was provided, the returned cursor will be a + /// `crate::sync::SessionCursor`. If [`with_type`](Aggregate::with_type) was called, the + /// returned cursor will be generic over the `T` specified. #[deeplink] pub fn aggregate(&self, pipeline: impl IntoIterator) -> Aggregate { self.async_collection.aggregate(pipeline) @@ -95,14 +103,15 @@ where /// Run an aggregation operation. Construct with [`Database::aggregate`] or /// [`Collection::aggregate`]. #[must_use] -pub struct Aggregate<'a, Session = ImplicitSession> { +pub struct Aggregate<'a, Session = ImplicitSession, T = Document> { target: AggregateTargetRef<'a>, pipeline: Vec, options: Option, session: Session, + _phantom: PhantomData, } -impl<'a, Session> Aggregate<'a, Session> { +impl<'a, Session, T> Aggregate<'a, Session, T> { option_setters!(options: AggregateOptions; allow_disk_use: bool, batch_size: u32, @@ -130,15 +139,50 @@ impl<'a> Aggregate<'a, ImplicitSession> { pipeline: self.pipeline, options: self.options, session: ExplicitSession(value.into()), + _phantom: PhantomData, } } } -#[action_impl(sync = crate::sync::Cursor)] -impl<'a> Action for Aggregate<'a, ImplicitSession> { +impl<'a, Session> Aggregate<'a, Session, Document> { + /// Use the provided type for the returned cursor. + /// + /// ```rust + /// # use futures_util::TryStreamExt; + /// # use mongodb::{bson::Document, error::Result, Cursor, Database}; + /// # use serde::Deserialize; + /// # async fn run() -> Result<()> { + /// # let database: Database = todo!(); + /// # let pipeline: Vec = todo!(); + /// #[derive(Deserialize)] + /// struct PipelineOutput { + /// len: usize, + /// } + /// + /// let aggregate_cursor = database + /// .aggregate(pipeline) + /// .with_type::() + /// .await?; + /// let aggregate_results: Vec = aggregate_cursor.try_collect().await?; + /// # Ok(()) + /// # } + /// ``` + pub fn with_type(self) -> Aggregate<'a, Session, T> { + Aggregate { + target: self.target, + pipeline: self.pipeline, + options: self.options, + session: self.session, + _phantom: PhantomData, + } + } +} + +#[action_impl(sync = crate::sync::Cursor)] +impl<'a, T> Action for Aggregate<'a, ImplicitSession, T> { type Future = AggregateFuture; - async fn execute(mut self) -> Result> { + async fn execute(mut self) -> Result> { resolve_options!( self.target, self.options, diff --git a/src/test/coll.rs b/src/test/coll.rs index 696ccbe91..23c1350ca 100644 --- a/src/test/coll.rs +++ b/src/test/coll.rs @@ -27,6 +27,7 @@ use crate::{ results::DeleteResult, test::{get_client_options, log_uncaptured, util::TestClient, EventClient}, Collection, + Cursor, IndexModel, }; @@ -1306,3 +1307,45 @@ async fn insert_many_document_sequences() { let second_batch_len = second_event.command.get_array("documents").unwrap().len(); assert_eq!(first_batch_len + second_batch_len, total_docs); } + +#[tokio::test] +async fn aggregate_with_generics() { + #[derive(Serialize)] + struct A { + str: String, + } + + #[derive(Deserialize)] + struct B { + len: i32, + } + + let client = TestClient::new().await; + let collection = client + .database("aggregate_with_generics") + .collection::("aggregate_with_generics"); + + let a = A { + str: "hi".to_string(), + }; + let len = a.str.len(); + collection.insert_one(&a).await.unwrap(); + + // Assert at compile-time that the default cursor returned is a Cursor + let basic_pipeline = vec![doc! { "$match": { "a": 1 } }]; + let _: Cursor = collection.aggregate(basic_pipeline).await.unwrap(); + + // Assert that data is properly deserialized when using with_type + let project_pipeline = vec![doc! { "$project": { + "str": 1, + "len": { "$strLenBytes": "$str" } + } + }]; + let cursor = collection + .aggregate(project_pipeline) + .with_type::() + .await + .unwrap(); + let lens: Vec = cursor.try_collect().await.unwrap(); + assert_eq!(lens[0].len as usize, len); +} diff --git a/src/test/db.rs b/src/test/db.rs index 9d0c7f4ca..46b6d1088 100644 --- a/src/test/db.rs +++ b/src/test/db.rs @@ -1,6 +1,7 @@ use std::cmp::Ord; use futures::stream::TryStreamExt; +use serde::Deserialize; use crate::{ action::Action, @@ -17,6 +18,7 @@ use crate::{ results::{CollectionSpecification, CollectionType}, test::util::TestClient, Client, + Cursor, Database, }; @@ -378,3 +380,32 @@ async fn clustered_index_list_collections() { .unwrap(); assert!(clustered_index_collection.options.clustered_index.is_some()); } + +#[tokio::test] +async fn aggregate_with_generics() { + #[derive(Deserialize)] + struct A { + str: String, + } + + let client = TestClient::new().await; + let database = client.database("aggregate_with_generics"); + + if client.server_version_lt(5, 1) { + log_uncaptured( + "skipping aggregate_with_generics: $documents agg stage only available on 5.1+", + ); + return; + } + + // The cursor returned will contain these documents + let pipeline = vec![doc! { "$documents": [ { "str": "hi" } ] }]; + + // Assert at compile-time that the default cursor returned is a Cursor + let _: Cursor = database.aggregate(pipeline.clone()).await.unwrap(); + + // Assert that data is properly deserialized when using with_type + let mut cursor = database.aggregate(pipeline).with_type::().await.unwrap(); + assert!(cursor.advance().await.unwrap()); + assert_eq!(&cursor.deserialize_current().unwrap().str, "hi"); +}