Skip to content

Commit

Permalink
adds integration tests
Browse files Browse the repository at this point in the history
Signed-off-by: Ahmar Suhail <ahmar.suhail@gmail.com>
  • Loading branch information
ahmarsuhail committed Jul 19, 2023
1 parent e92e4ad commit 9068457
Show file tree
Hide file tree
Showing 5 changed files with 151 additions and 8 deletions.
4 changes: 4 additions & 0 deletions mountpoint-s3-client/src/mock_client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@ impl MockClient {
pub fn is_upload_in_progress(&self, key: &str) -> bool {
self.in_progress_uploads.read().unwrap().contains(key)
}

pub fn get_object_storage_class(&self, key: &str) -> String {
self.objects.read().unwrap().get(key).unwrap().storage_class.to_owned()
}
}

#[derive(Clone)]
Expand Down
40 changes: 40 additions & 0 deletions mountpoint-s3-client/tests/put_object.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ use mountpoint_s3_client::S3CrtClient;
use mountpoint_s3_crt::checksums::crc32c;
use rand::Rng;

use test_case::test_case;

// Simple test for PUT object. Puts a single, small object as a single part and checks that the
// contents are correct with a GET.
async fn test_put_object(client: &impl ObjectClient, bucket: &str, prefix: &str) {
Expand Down Expand Up @@ -231,3 +233,41 @@ async fn test_put_checksums() {
assert_eq!(checksum, encoded);
}
}

#[test_case("INTELLIGENT_TIERING")]
#[test_case("GLACIER")]
#[tokio::test]
async fn test_put_object_with_storage_class(storage_class: &str) {
const PART_SIZE: usize = 5 * 1024 * 1024;
let mut rng = rand::thread_rng();
let (bucket, prefix) = get_test_bucket_and_prefix("test_put_checksums");
let client_config = S3ClientConfig::new().part_size(PART_SIZE);
let client = S3CrtClient::new(&get_test_region(), client_config).expect("could not create test client");
let key = format!("{prefix}hello");

let mut contents = vec![0u8; 32];
rng.fill(&mut contents[..]);

let put_object_params = PutObjectParams::new().storage_class(Some(storage_class.to_owned()));
let mut request = client
.put_object(&bucket, &key, &put_object_params)
.await
.expect("put_object should succeed");

request.write(&contents).await.unwrap();
request.complete().await.unwrap();

let sdk_client = get_test_sdk_client().await;
let attributes = sdk_client
.get_object_attributes()
.bucket(bucket)
.key(key)
.object_attributes(aws_sdk_s3::model::ObjectAttributes::StorageClass)
.send()
.await
.unwrap();


assert_eq!(attributes.storage_class.unwrap().as_str(), storage_class);

}
11 changes: 4 additions & 7 deletions mountpoint-s3/src/upload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,13 +145,12 @@ mod tests {
let bucket = "bucket";
let name = "hello";
let key = name;
let storage_class = "INTELLIGENT_TIERING";

let client = Arc::new(MockClient::new(MockClientConfig {
bucket: bucket.to_owned(),
part_size: 32,
}));
let uploader = Uploader::new(client.clone(), storage_class);
let uploader = Uploader::new(client.clone(), None);
let request = uploader.put(bucket, key).await.unwrap();

assert!(!client.contains_key(key));
Expand All @@ -174,7 +173,7 @@ mod tests {
bucket: bucket.to_owned(),
part_size: 32,
}));
let uploader = Uploader::new(client.clone(), storage_class);
let uploader = Uploader::new(client.clone(), Some(storage_class.to_owned()));

let mut request = uploader.put(bucket, key).await.unwrap();

Expand Down Expand Up @@ -204,7 +203,6 @@ mod tests {
let bucket = "bucket";
let name = "hello";
let key = name;
let storage_class = "INTELLIGENT_TIERING";

let client = Arc::new(MockClient::new(MockClientConfig {
bucket: bucket.to_owned(),
Expand All @@ -223,7 +221,7 @@ mod tests {
put_failures,
));

let uploader = Uploader::new(failure_client.clone(), storage_class);
let uploader = Uploader::new(failure_client.clone(), None);

// First request fails on first write.
{
Expand Down Expand Up @@ -261,13 +259,12 @@ mod tests {
let bucket = "bucket";
let name = "hello";
let key = name;
let storage_class = "INTELLIGENT_TIERING";

let client = Arc::new(MockClient::new(MockClientConfig {
bucket: bucket.to_owned(),
part_size: PART_SIZE,
}));
let uploader = Uploader::new(client.clone(), storage_class);
let uploader = Uploader::new(client.clone(), None);
let mut request = uploader.put(bucket, key).await.unwrap();

let successful_writes = PART_SIZE * MAX_S3_MULTIPART_UPLOAD_PARTS / write_size;
Expand Down
23 changes: 23 additions & 0 deletions mountpoint-s3/tests/fuse_tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ pub trait TestClient {
fn contains_key(&self, key: &str) -> Result<bool, Box<dyn std::error::Error>>;

fn is_upload_in_progress(&self, key: &str) -> Result<bool, Box<dyn std::error::Error>>;

fn get_storage_class(&self, key: &str) -> Result<String, Box<dyn std::error::Error>>;
}

pub type TestClientBox = Box<dyn TestClient>;
Expand Down Expand Up @@ -141,6 +143,11 @@ mod mock_session {
let full_key = format!("{}{}", self.prefix, key);
Ok(self.client.is_upload_in_progress(&full_key))
}

fn get_storage_class(&self, key: &str) -> Result<String, Box<dyn std::error::Error>> {
let full_key = format!("{}{}", self.prefix, key);
Ok(self.client.get_object_storage_class(&full_key))
}
}
}

Expand Down Expand Up @@ -292,6 +299,22 @@ mod s3_session {
.map(|output| output.uploads().map_or(0, |u| u.len()) > 0)
.map_err(|e| Box::new(e) as Box<dyn std::error::Error>)
}

fn get_storage_class(&self, key: &str) -> Result<String, Box<dyn std::error::Error>> {
let full_key = format!("{}{}", self.prefix, key);
tokio_block_on(
self.sdk_client
.get_object_attributes()
.bucket(&self.bucket)
.key(full_key)
.object_attributes(aws_sdk_s3::model::ObjectAttributes::StorageClass)
.send()
)
.map(|output| {
output.storage_class.unwrap().as_str().to_owned()
})
.map_err(|e| Box::new(e) as Box<dyn std::error::Error>)
}
}
}
/// Take a `read_dir` iterator and return the entry names
Expand Down
81 changes: 80 additions & 1 deletion mountpoint-s3/tests/fuse_tests/write_test.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use std::fs::{metadata, read, read_dir, File};
use std::io::{ErrorKind, Read, Seek, Write};
use std::os::unix::prelude::OpenOptionsExt;
use std::path::Path;
use std::path::{Path, PathBuf};
use std::time::Duration;

use fuser::BackgroundSession;
Expand All @@ -10,6 +10,8 @@ use rand_chacha::ChaCha20Rng;
use tempfile::TempDir;
use test_case::test_case;

use mountpoint_s3::S3FilesystemConfig;

use crate::fuse_tests::{read_dir_to_entry_names, TestClientBox, TestSessionConfig};

fn open_for_write(path: impl AsRef<Path>, append: bool) -> std::io::Result<File> {
Expand Down Expand Up @@ -375,6 +377,70 @@ where
assert_eq!(err.raw_os_error(), Some(libc::ENOENT));
}


fn write_with_storage_class_test<F>(creator_fn: F, storage_class: &str)
where
F: FnOnce(&str, TestSessionConfig) -> (TempDir, BackgroundSession, TestClientBox),
{
const KEY: &str = "new.txt";

let filesystem_config: S3FilesystemConfig = S3FilesystemConfig {
storage_class: Some(storage_class.to_owned()),
..Default::default()
};

let config = TestSessionConfig {
filesystem_config,
..Default::default()
};
let (mount_point, _session, test_client) = creator_fn("write_with_storage_class_test", config);

let path = mount_point.path().join(KEY);

let mut f = open_for_write(&path, false).unwrap();

let data = [0xaa; 16];
f.write_all(&data).unwrap();
f.sync_all().unwrap();
drop(f);


let s3_storage_class = test_client.get_storage_class(KEY).unwrap();

assert_eq!(s3_storage_class, storage_class);

}


fn write_with_invalid_storage_class_test<F>(creator_fn: F, storage_class: &str)
where
F: FnOnce(&str, TestSessionConfig) -> (TempDir, BackgroundSession, TestClientBox),
{
const KEY: &str = "new.txt";

let filesystem_config: S3FilesystemConfig = S3FilesystemConfig {
storage_class: Some(storage_class.to_owned()),
..Default::default()
};

let config = TestSessionConfig {
filesystem_config,
..Default::default()
};
let (mount_point, _session, _test_client) = creator_fn("write_with_storage_class_test", config);

let path = mount_point.path().join(KEY);
write_file(path).expect_err("write with invalid storage class should fail");

}

fn write_file(path: PathBuf) -> std::io::Result<()> {
let mut f = open_for_write(&path, false)?;
let data = [0xaa; 16];
f.write_all(&data)?;
Ok(())
}

#[cfg(feature = "s3_tests")]
#[test_case(-1; "earlier offset")]
#[test_case(1; "later offset")]
Expand All @@ -387,3 +453,16 @@ fn out_of_order_write_test_s3(offset: i64) {
fn out_of_order_write_test_mock(offset: i64) {
out_of_order_write_test(crate::fuse_tests::mock_session::new, offset);
}

#[cfg(feature = "s3_tests")]
#[test_case("INTELLIGENT_TIERING")]
#[test_case("GLACIER")]
fn write_with_storage_class_test_s3(storage_class: &str) {
write_with_storage_class_test(crate::fuse_tests::s3_session::new, storage_class);
}

#[test_case("INVALID_CLASS")]
fn write_with_invalid_storage_class_test_s3(storage_class: &str) {
write_with_invalid_storage_class_test(crate::fuse_tests::s3_session::new, storage_class);
}

0 comments on commit 9068457

Please sign in to comment.