Skip to content

RUST-656 Run tests against serverless in evergreen #504

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 21 commits into from
Nov 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
86 changes: 86 additions & 0 deletions .evergreen/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -377,6 +377,33 @@ functions:
MONGODB_API_VERSION=${MONGODB_API_VERSION} \
.evergreen/run-tests.sh

"run serverless tests":
- command: shell.exec
type: test
params:
shell: bash
working_dir: "src"
script: |
${PREPARE_SHELL}
set +o xtrace
# Exported without xtrace to avoid leaking credentials
export SERVERLESS_ATLAS_USER="${SERVERLESS_ATLAS_USER}"
export SERVERLESS_ATLAS_PASSWORD="${SERVERLESS_ATLAS_PASSWORD}"

export MONGODB_URI="${MONGODB_URI}"
export SSL="${SSL}"
export SINGLE_MONGOS_LB_URI="${SINGLE_ATLASPROXY_SERVERLESS_URI}"
export MULTI_MONGOS_LB_URI="${MULTI_ATLASPROXY_SERVERLESS_URI}"
. .evergreen/generate-uri.sh

SNAPPY_COMPRESSION_ENABLED="true" \
ZLIB_COMPRESSION_ENABLED="true" \
ZSTD_COMPRESSION_ENABLED="true" \
SINGLE_THREAD=${SINGLE_THREAD} \
ASYNC_RUNTIME=${ASYNC_RUNTIME} \
MONGODB_API_VERSION=${MONGODB_API_VERSION} \
.evergreen/run-serverless-tests.sh

"run atlas tests":
- command: shell.exec
type: test
Expand Down Expand Up @@ -881,6 +908,11 @@ tasks:
commands:
- func: "run connection string tests"

- name: "test-serverless"
tags: ["serverless"]
commands:
- func: "run serverless tests"

- name: "test-atlas-connectivity"
tags: ["atlas-connect"]
commands:
Expand Down Expand Up @@ -1387,6 +1419,51 @@ axes:
REQUIRE_API_VERSION: "1"
MONGODB_API_VERSION: "1"

task_groups:
- name: serverless_task_group
setup_group_can_fail_task: true
setup_group_timeout_secs: 1800 # 30 minutes
setup_group:
- func: "fetch source"
- func: "prepare resources"
- func: "windows fix"
- func: "fix absolute paths"
- func: "init test-results"
- func: "make files executable"
- func: "install dependencies"
- command: shell.exec
params:
shell: "bash"
script: |
${PREPARE_SHELL}
set +o xtrace
SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \
SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \
SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \
LOADBALANCED=ON \
bash ${DRIVERS_TOOLS}/.evergreen/serverless/create-instance.sh
- command: expansions.update
params:
file: serverless-expansion.yml
teardown_group:
- command: shell.exec
params:
script: |
${PREPARE_SHELL}
set +o xtrace
SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \
SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \
SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \
SERVERLESS_INSTANCE_NAME=${SERVERLESS_INSTANCE_NAME} \
bash ${DRIVERS_TOOLS}/.evergreen/serverless/delete-instance.sh
- func: "upload test results"
- func: "upload-mo-artifacts"
- func: "cleanup"

tasks:
- ".serverless"


buildvariants:
-
matrix_name: "tests"
Expand Down Expand Up @@ -1477,6 +1554,15 @@ buildvariants:
tasks:
- "test-plain-auth"

- matrix_name: "serverless"
matrix_spec:
os:
- ubuntu-18.04
async-runtime: "*"
display_name: "Serverless ${os} with ${async-runtime}"
tasks:
- "serverless_task_group"

- matrix_name: "atlas-connect"
matrix_spec:
os:
Expand Down
5 changes: 4 additions & 1 deletion .evergreen/generate-uri.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,10 @@ update_uri() {
else
ORIG_URI="${ORIG_URI}/?"
fi
echo "${ORIG_URI}tls=true&tlsCAFile=${CA_FILE}&tlsCertificateKeyFile=${CERT_FILE}&tlsAllowInvalidCertificates=true"
if [[ "$ORIG_URI" != *"tls=true"* ]]; then
ORIG_URI="${ORIG_URI}tls=true&"
fi
echo "${ORIG_URI}tlsCAFile=${CA_FILE}&tlsCertificateKeyFile=${CERT_FILE}&tlsAllowInvalidCertificates=true"
}

export MONGODB_URI="$(update_uri ${MONGODB_URI})"
Expand Down
41 changes: 41 additions & 0 deletions .evergreen/run-serverless-tests.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#!/usr/bin/env bash

set -o errexit

source ./.evergreen/env.sh

FEATURE_FLAGS="zstd-compression,snappy-compression,zlib-compression"
DEFAULT_FEATURES=""

if [ "$ASYNC_RUNTIME" = "async-std" ]; then
FEATURE_FLAGS="${FEATURE_FLAGS},async-std-runtime"
DEFAULT_FEATURES="--no-default-features"
elif [ "$ASYNC_RUNTIME" != "tokio" ]; then
echo "invalid async runtime: ${ASYNC_RUNTIME}" >&2
exit 1
fi

OPTIONS="-- -Z unstable-options --format json --report-time"

if [ "$SINGLE_THREAD" = true ]; then
OPTIONS="$OPTIONS --test-threads=1"
fi

echo "cargo test options: ${DEFAULT_FEATURES} --features $FEATURE_FLAGS ${OPTIONS}"

cargo_test() {
RUST_BACKTRACE=1 \
SERVERLESS="serverless" \
cargo test ${DEFAULT_FEATURES} --features $FEATURE_FLAGS $1 $OPTIONS | cargo2junit
}

cargo_test test::spec::crud > crud.xml
cargo_test test::spec::retryable_reads > retryable_reads.xml
cargo_test test::spec::retryable_writes > retryable_writes.xml
cargo_test test::spec::versioned_api > versioned_api.xml
cargo_test test::spec::sessions > sessions.xml
cargo_test test::spec::transactions > transactions.xml
cargo_test test::spec::load_balancers > load_balancers.xml
cargo_test test::cursor > cursor.xml

junit-report-merger results.xml crud.xml retryable_reads.xml retryable_writes.xml versioned_api.xml sessions.xml transactions.xml load_balancers.xml cursor.xml
5 changes: 5 additions & 0 deletions src/client/options/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1205,6 +1205,9 @@ impl ClientOptions {
/// Applies the options in other to these options if a value is not already present
#[cfg(test)]
pub(crate) fn merge(&mut self, other: ClientOptions) {
if self.hosts.is_empty() {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not super thrilled about the behavior here but I can't think of a clean solution.

  • TestClient and EventClient have a with_additional_options constructor fn that, semantically, takes a passed-in ClientOptions and merges in the set of "test-default" options there for any unset fields, with preference given to the passed-in fields where both were set.
  • Prior to this change, hosts was not included in the merge, so the passed-in ClientOptions would always determine hosts. This caused the label_not_added_first_read_error / label_not_added_second_read_error tests to break on serverless because those tests were not setting hosts and the default value was not valid in those cases.
  • Ideally, the semantics for hosts would be the same as for the rest of the fields - if unset, it would use the "test default". However, because it's populated in the builder by default, there's no way to actually determine "if unset"; testing against the default value causes other tests to fail because those are specifically constructing a hosts vec that, coincidentally, is equal to the default value, and should not be overridden by the test default.
  • Again ideally, this could be fixed by changing hosts to an Option<Vec<_>> or not populating it by default so vec[] would be a reasonable signal for "unset", but either of those would be a breaking change.
  • Calling with_additional_options with a ClientOptions constructed via builder with the default hosts is a nasty pitfall that's easy to hit - from the call site it looks like you'll be constructing a client with only the options specified, and the default hosts value will coincidentally work in most but not all testing environments.
  • Existing tests with the exception of the two that broke avoid this by either explicitly populating hosts or copying the ClientOptions from the CLIENT_OPTIONS value.

self.hosts = other.hosts;
}
merge_options!(
other,
self,
Expand All @@ -1218,6 +1221,7 @@ impl ClientOptions {
direct_connection,
driver_info,
heartbeat_freq,
load_balanced,
local_threshold,
max_idle_time,
max_pool_size,
Expand All @@ -1230,6 +1234,7 @@ impl ClientOptions {
server_api,
server_selection_timeout,
socket_timeout,
test_options,
tls,
write_concern,
original_srv_info,
Expand Down
73 changes: 39 additions & 34 deletions src/coll/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1078,46 +1078,51 @@ where

n_attempted += current_batch_size;
}
Err(e) => match *e.kind {
ErrorKind::BulkWrite(bw) => {
// for ordered inserts this size will be incorrect, but knowing the batch
// size isn't needed for ordered failures since we
// return immediately from them anyways.
let current_batch_size = bw.inserted_ids.len()
+ bw.write_errors.as_ref().map(|we| we.len()).unwrap_or(0);

let failure_ref =
cumulative_failure.get_or_insert_with(BulkWriteFailure::new);
if let Some(write_errors) = bw.write_errors {
for err in write_errors {
let index = n_attempted + err.index;

failure_ref
.write_errors
.get_or_insert_with(Default::default)
.push(BulkWriteError { index, ..err });
Err(e) => {
let labels = e.labels().clone();
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Github diff is misleading here - the only change is cloning the labels before the match to accommodate the labels field no longer being visible.

match *e.kind {
ErrorKind::BulkWrite(bw) => {
// for ordered inserts this size will be incorrect, but knowing the
// batch size isn't needed for ordered
// failures since we return immediately from
// them anyways.
let current_batch_size = bw.inserted_ids.len()
+ bw.write_errors.as_ref().map(|we| we.len()).unwrap_or(0);

let failure_ref =
cumulative_failure.get_or_insert_with(BulkWriteFailure::new);
if let Some(write_errors) = bw.write_errors {
for err in write_errors {
let index = n_attempted + err.index;

failure_ref
.write_errors
.get_or_insert_with(Default::default)
.push(BulkWriteError { index, ..err });
}
}
}

if let Some(wc_error) = bw.write_concern_error {
failure_ref.write_concern_error = Some(wc_error);
}

error_labels.extend(e.labels);
if let Some(wc_error) = bw.write_concern_error {
failure_ref.write_concern_error = Some(wc_error);
}

if ordered {
// this will always be true since we invoked get_or_insert_with above.
if let Some(failure) = cumulative_failure {
return Err(Error {
kind: Box::new(ErrorKind::BulkWrite(failure)),
labels: error_labels,
});
error_labels.extend(labels);

if ordered {
// this will always be true since we invoked get_or_insert_with
// above.
if let Some(failure) = cumulative_failure {
return Err(Error::new(
ErrorKind::BulkWrite(failure),
Some(error_labels),
));
}
}
n_attempted += current_batch_size;
}
n_attempted += current_batch_size;
_ => return Err(e),
}
_ => return Err(e),
},
}
}
}

Expand Down
39 changes: 31 additions & 8 deletions src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,16 +45,20 @@ pub type Result<T> = std::result::Result<T, Error>;
pub struct Error {
/// The type of error that occurred.
pub kind: Box<ErrorKind>,
pub(crate) labels: HashSet<String>,
labels: HashSet<String>,
}

impl Error {
pub(crate) fn new(kind: ErrorKind, labels: Option<impl IntoIterator<Item = String>>) -> Self {
let mut labels: HashSet<String> = labels
.map(|labels| labels.into_iter().collect())
.unwrap_or_default();
if let Some(wc) = kind.get_write_concern_error() {
labels.extend(wc.labels.clone());
}
Self {
kind: Box::new(kind),
labels: labels
.map(|labels| labels.into_iter().collect())
.unwrap_or_default(),
labels,
}
}

Expand Down Expand Up @@ -318,10 +322,7 @@ where
ErrorKind: From<E>,
{
fn from(err: E) -> Self {
Self {
kind: Box::new(err.into()),
labels: Default::default(),
}
Error::new(err.into(), None::<Option<String>>)
}
}

Expand Down Expand Up @@ -433,6 +434,22 @@ pub enum ErrorKind {
IncompatibleServer { message: String },
}

impl ErrorKind {
// This is only used as part of a workaround to Atlas Proxy not returning
// toplevel error labels.
// TODO CLOUDP-105256 Remove this when Atlas Proxy error label behavior is fixed.
fn get_write_concern_error(&self) -> Option<&WriteConcernError> {
match self {
ErrorKind::BulkWrite(BulkWriteFailure {
write_concern_error,
..
}) => write_concern_error.as_ref(),
ErrorKind::Write(WriteFailure::WriteConcernError(err)) => Some(err),
_ => None,
}
}
}

/// An error that occurred due to a database command failing.
#[derive(Clone, Debug, Deserialize)]
#[non_exhaustive]
Expand Down Expand Up @@ -473,6 +490,12 @@ pub struct WriteConcernError {
/// A document identifying the write concern setting related to the error.
#[serde(rename = "errInfo")]
pub details: Option<Document>,

/// Labels categorizing the error.
// TODO CLOUDP-105256 Remove this when the Atlas Proxy properly returns
// error labels at the top level.
#[serde(rename = "errorLabels", default)]
pub(crate) labels: Vec<String>,
}

/// An error that occurred during a write operation that wasn't due to being unable to satisfy a
Expand Down
1 change: 1 addition & 0 deletions src/operation/delete/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,7 @@ async fn handle_write_concern_failure() {
"wtimeout": 0,
"provenance": "clientSupplied"
} }),
labels: vec![],
};
assert_eq!(wc_error, &expected_wc_err);
}
Expand Down
1 change: 1 addition & 0 deletions src/operation/insert/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -356,6 +356,7 @@ async fn handle_write_failure() {
"wtimeout": 0,
"provenance": "clientSupplied"
} }),
labels: vec![],
};
assert_eq!(write_concern_error, expected_wc_err);

Expand Down
1 change: 1 addition & 0 deletions src/operation/update/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,7 @@ async fn handle_write_concern_failure() {
"wtimeout": 0,
"provenance": "clientSupplied"
} }),
labels: vec![],
};
assert_eq!(wc_error, &expected_wc_err);
}
Expand Down
Loading