diff --git a/.github/workflows/adhoc-forge.yaml b/.github/workflows/adhoc-forge.yaml index 9ebd5f2bfa185a..ac71f0f1d57e63 100644 --- a/.github/workflows/adhoc-forge.yaml +++ b/.github/workflows/adhoc-forge.yaml @@ -36,6 +36,11 @@ on: default: false type: boolean description: enable haproxy for the forge test + FORGE_ENABLE_INDEXER: + required: false + default: false + type: boolean + description: enable indexer for the forge test FORGE_NUM_VALIDATORS: required: false type: string @@ -68,6 +73,7 @@ jobs: echo "FORGE_TEST_SUITE: ${{ inputs.FORGE_TEST_SUITE }}" echo "FORGE_CLUSTER_NAME: ${{ inputs.FORGE_CLUSTER_NAME }}" echo "FORGE_ENABLE_HAPROXY: ${{ inputs.FORGE_ENABLE_HAPROXY }}" + echo "FORGE_ENABLE_INDEXER: ${{ inputs.FORGE_ENABLE_INDEXER }}" echo "FORGE_NUM_VALIDATORS: ${{ inputs.FORGE_NUM_VALIDATORS }}" echo "FORGE_NUM_VALIDATOR_FULLNODES: ${{ inputs.FORGE_NUM_VALIDATOR_FULLNODES }}" echo "FORGE_RETAIN_DEBUG_LOGS: ${{ inputs.FORGE_RETAIN_DEBUG_LOGS }}" @@ -79,6 +85,7 @@ jobs: forgeTestSuite: ${{ inputs.FORGE_TEST_SUITE }} forgeClusterName: ${{ inputs.FORGE_CLUSTER_NAME }} forgeEnableHaproxy: ${{ inputs.FORGE_ENABLE_HAPROXY }} + forgeEnableIndexer: ${{ inputs.FORGE_ENABLE_INDEXER }} forgeNumValidators: ${{ inputs.FORGE_NUM_VALIDATORS }} forgeNumValidatorFullnodes: ${{ inputs.FORGE_NUM_VALIDATOR_FULLNODES }} @@ -94,6 +101,7 @@ jobs: FORGE_RUNNER_DURATION_SECS: ${{ fromJSON(needs.determine-forge-run-metadata.outputs.forgeRunnerDurationSecs) }} # fromJSON converts to integer FORGE_CLUSTER_NAME: ${{ needs.determine-forge-run-metadata.outputs.forgeClusterName }} FORGE_ENABLE_HAPROXY: ${{ needs.determine-forge-run-metadata.outputs.forgeEnableHaproxy }} + FORGE_ENABLE_INDEXER: ${{ needs.determine-forge-run-metadata.outputs.forgeEnableIndexer }} FORGE_NUM_VALIDATORS: ${{ needs.determine-forge-run-metadata.outputs.forgeNumValidators }} FORGE_NUM_VALIDATOR_FULLNODES: ${{ needs.determine-forge-run-metadata.outputs.forgeNumValidatorFullnodes }} FORGE_RETAIN_DEBUG_LOGS: ${{ inputs.FORGE_RETAIN_DEBUG_LOGS }} diff --git a/.github/workflows/workflow-run-forge.yaml b/.github/workflows/workflow-run-forge.yaml index 08735140d6ce6e..84b1a95ea61f0c 100644 --- a/.github/workflows/workflow-run-forge.yaml +++ b/.github/workflows/workflow-run-forge.yaml @@ -57,6 +57,10 @@ on: required: false type: string description: Whether to use HAPRoxy + FORGE_ENABLE_INDEXER: + required: false + type: string + description: Whether to use indexer FORGE_ENABLE_PERFORMANCE: required: false type: string @@ -104,6 +108,7 @@ env: FORGE_RUNNER_DURATION_SECS: ${{ inputs.FORGE_RUNNER_DURATION_SECS }} FORGE_NAMESPACE: ${{ inputs.FORGE_NAMESPACE }} FORGE_ENABLE_HAPROXY: ${{ inputs.FORGE_ENABLE_HAPROXY }} + FORGE_ENABLE_INDEXER: ${{ inputs.FORGE_ENABLE_INDEXER }} FORGE_TEST_SUITE: ${{ inputs.FORGE_TEST_SUITE }} POST_TO_SLACK: ${{ inputs.POST_TO_SLACK }} FORGE_ENABLE_FAILPOINTS: ${{ inputs.FORGE_ENABLE_FAILPOINTS }} diff --git a/testsuite/forge-cli/src/main.rs b/testsuite/forge-cli/src/main.rs index c41e8b0da7b0f3..026a06a02be953 100644 --- a/testsuite/forge-cli/src/main.rs +++ b/testsuite/forge-cli/src/main.rs @@ -140,8 +140,8 @@ enum OperatorCommand { SetNodeImageTag(SetNodeImageTag), /// Clean up an existing cluster CleanUp(CleanUp), - /// Resize an existing cluster - Resize(Resize), + /// Create a new cluster for testing purposes + Create(Create), } #[derive(Parser, Debug)] @@ -193,6 +193,11 @@ struct K8sSwarm { help = "Retain debug logs and above for all nodes instead of just the first 5 nodes" )] retain_debug_logs: bool, + #[clap( + long, + help = "If set, spins up an indexer stack alongside the testnet. Same as --enable-indexer" + )] + enable_indexer: bool, } #[derive(Parser, Debug)] @@ -217,8 +222,8 @@ struct CleanUp { } #[derive(Parser, Debug)] -struct Resize { - #[clap(long, help = "The kubernetes namespace to resize")] +struct Create { + #[clap(long, help = "The kubernetes namespace to create in")] namespace: String, #[clap(long, default_value_t = 30)] num_validators: usize, @@ -227,13 +232,13 @@ struct Resize { #[clap( long, help = "Override the image tag used for validators", - default_value = "devnet" + default_value = "main" )] validator_image_tag: String, #[clap( long, help = "Override the image tag used for testnet-specific components", - default_value = "devnet" + default_value = "main" )] testnet_image_tag: String, #[clap( @@ -248,6 +253,8 @@ struct Resize { connect_directly: bool, #[clap(long, help = "If set, enables HAProxy for each of the validators")] enable_haproxy: bool, + #[clap(long, help = "If set, spins up an indexer stack alongside the testnet")] + enable_indexer: bool, } // common metrics thresholds: @@ -393,6 +400,7 @@ fn main() -> Result<()> { k8s.reuse, k8s.keep, k8s.enable_haproxy, + k8s.enable_indexer, ) .unwrap(), &args.options, @@ -421,19 +429,25 @@ fn main() -> Result<()> { } Ok(()) }, - OperatorCommand::Resize(resize) => { - runtime.block_on(install_testnet_resources( - resize.namespace, - resize.num_validators, - resize.num_fullnodes, - resize.validator_image_tag, - resize.testnet_image_tag, - resize.move_modules_dir, - !resize.connect_directly, - resize.enable_haproxy, - None, - None, - ))?; + OperatorCommand::Create(create) => { + let kube_client = runtime.block_on(create_k8s_client())?; + let era = generate_new_era(); + let values = ForgeDeployerValues { + profile: DEFAULT_FORGE_DEPLOYER_PROFILE.to_string(), + era, + namespace: create.namespace, + indexer_grpc_values: None, + indexer_processor_values: None, + }; + let forge_deployer_manager = + ForgeDeployerManager::from_k8s_client(kube_client, values); + runtime.block_on(forge_deployer_manager.ensure_namespace_prepared())?; + // NOTE: this is generally not going to run from within the cluster, do not perform any operations + // that might require internal DNS resolution to work, such as txn emission directly against the node service IPs. + runtime.block_on(forge_deployer_manager.start(ForgeDeployerType::Testnet))?; + if create.enable_indexer { + runtime.block_on(forge_deployer_manager.start(ForgeDeployerType::Indexer))?; + } Ok(()) }, }, diff --git a/testsuite/forge.py b/testsuite/forge.py index e81539ce0b33ff..613db5ddc80275 100644 --- a/testsuite/forge.py +++ b/testsuite/forge.py @@ -1147,6 +1147,7 @@ def create_forge_command( forge_namespace_reuse: Optional[str], forge_namespace_keep: Optional[str], forge_enable_haproxy: Optional[str], + forge_enable_indexer: Optional[str], cargo_args: Optional[Sequence[str]], forge_cli_args: Optional[Sequence[str]], test_args: Optional[Sequence[str]], @@ -1216,6 +1217,8 @@ def create_forge_command( forge_args.append("--keep") if forge_enable_haproxy == "true": forge_args.append("--enable-haproxy") + if forge_enable_indexer == "true": + forge_args.append("--enable-indexer") if test_args: forge_args.extend(test_args) @@ -1328,6 +1331,7 @@ def seeded_random_choice(namespace: str, cluster_names: Sequence[str]) -> str: @envoption("FORGE_NAMESPACE_KEEP") @envoption("FORGE_NAMESPACE_REUSE") @envoption("FORGE_ENABLE_HAPROXY") +@envoption("FORGE_ENABLE_INDEXER") @envoption("FORGE_ENABLE_FAILPOINTS") @envoption("FORGE_ENABLE_PERFORMANCE") @envoption("FORGE_TEST_SUITE") @@ -1373,6 +1377,7 @@ def test( forge_enable_failpoints: Optional[str], forge_enable_performance: Optional[str], forge_enable_haproxy: Optional[str], + forge_enable_indexer: Optional[str], forge_test_suite: str, forge_runner_duration_secs: str, forge_image_tag: Optional[str], @@ -1598,6 +1603,7 @@ def test( forge_namespace_reuse=forge_namespace_reuse, forge_namespace_keep=forge_namespace_keep, forge_enable_haproxy=forge_enable_haproxy, + forge_enable_indexer=forge_enable_indexer, cargo_args=cargo_args, forge_cli_args=forge_cli_args, test_args=test_args, diff --git a/testsuite/forge/src/backend/k8s/cluster_helper.rs b/testsuite/forge/src/backend/k8s/cluster_helper.rs index a75996c0e4ab34..8e79dc08f2a9ae 100644 --- a/testsuite/forge/src/backend/k8s/cluster_helper.rs +++ b/testsuite/forge/src/backend/k8s/cluster_helper.rs @@ -405,7 +405,7 @@ pub async fn uninstall_testnet_resources(kube_namespace: String) -> Result<()> { Ok(()) } -fn generate_new_era() -> String { +pub fn generate_new_era() -> String { let mut rng = rand::thread_rng(); let r: u8 = rng.gen(); format!("forge{}", r) @@ -826,15 +826,41 @@ fn dump_helm_values_to_file(helm_release_name: &str, tmp_dir: &TempDir) -> Resul #[derive(Error, Debug)] #[error("{0}")] -enum ApiError { +pub enum ApiError { RetryableError(String), FinalError(String), } -async fn create_namespace( +/// Does the same as create_namespace and handling the 409, but for any k8s resource T +pub async fn maybe_create_k8s_resource( + api: Arc>, + resource: T, +) -> Result +where + T: kube::Resource + Clone + DeserializeOwned + Debug, + ::DynamicType: Default, +{ + if let Err(KubeError::Api(api_err)) = api.create(&PostParams::default(), &resource).await { + if api_err.code == 409 { + info!( + "Resource {} already exists, continuing with it", + resource.name() + ); + } else { + return Err(ApiError::RetryableError(format!( + "Failed to use existing resource {}: {:?}", + resource.name(), + api_err + ))); + } + } + Ok(resource) +} + +pub async fn create_namespace( namespace_api: Arc>, kube_namespace: String, -) -> Result<(), ApiError> { +) -> Result { let kube_namespace_name = kube_namespace.clone(); let namespace = Namespace { metadata: ObjectMeta { @@ -866,7 +892,7 @@ async fn create_namespace( ))); } } - Ok(()) + Ok(namespace) } pub async fn create_management_configmap( @@ -1067,11 +1093,11 @@ pub fn make_k8s_label(value: String) -> String { #[cfg(test)] mod tests { use super::*; - use crate::FailedNamespacesApi; + use crate::FailedK8sResourceApi; #[tokio::test] async fn test_create_namespace_final_error() { - let namespace_creator = Arc::new(FailedNamespacesApi::from_status_code(401)); + let namespace_creator = Arc::new(FailedK8sResourceApi::from_status_code(401)); let result = create_namespace(namespace_creator, "banana".to_string()).await; match result { Err(ApiError::FinalError(_)) => {}, @@ -1148,7 +1174,7 @@ labels: #[tokio::test] async fn test_create_namespace_retryable_error() { - let namespace_creator = Arc::new(FailedNamespacesApi::from_status_code(403)); + let namespace_creator = Arc::new(FailedK8sResourceApi::from_status_code(403)); let result = create_namespace(namespace_creator, "banana".to_string()).await; match result { Err(ApiError::RetryableError(_)) => {}, diff --git a/testsuite/forge/src/backend/k8s/fullnode.rs b/testsuite/forge/src/backend/k8s/fullnode.rs index 627fcdc758d71b..1d4dadc5c1b406 100644 --- a/testsuite/forge/src/backend/k8s/fullnode.rs +++ b/testsuite/forge/src/backend/k8s/fullnode.rs @@ -513,9 +513,7 @@ pub async fn install_public_fullnode<'a>( #[cfg(test)] mod tests { use super::*; - use crate::{ - MockConfigMapApi, MockPersistentVolumeClaimApi, MockServiceApi, MockStatefulSetApi, - }; + use crate::MockK8sResourceApi; use aptos_config::config::Identity; use aptos_sdk::crypto::{x25519::PrivateKey, Uniform}; use k8s_openapi::apimachinery::pkg::api::resource::Quantity; @@ -728,15 +726,14 @@ mod tests { let version = Version::new(0, "banana".to_string()); // create APIs - let stateful_set_api = Arc::new(MockStatefulSetApi::from_stateful_set( - get_dummy_validator_stateful_set(), + let stateful_set_api: Arc> = Arc::new( + MockK8sResourceApi::from_resource(get_dummy_validator_stateful_set()), + ); + let configmap_api = Arc::new(MockK8sResourceApi::new()); + let persistent_volume_claim_api = Arc::new(MockK8sResourceApi::from_resource( + get_dummy_validator_persistent_volume_claim(), )); - let configmap_api = Arc::new(MockConfigMapApi::from_config_map(ConfigMap::default())); - let persistent_volume_claim_api = - Arc::new(MockPersistentVolumeClaimApi::from_persistent_volume_claim( - get_dummy_validator_persistent_volume_claim(), - )); - let service_api = Arc::new(MockServiceApi::from_service(Service::default())); + let service_api = Arc::new(MockK8sResourceApi::new()); // get the base config and mutate it let mut node_config = get_default_pfn_node_config(); diff --git a/testsuite/forge/src/backend/k8s/kube_api.rs b/testsuite/forge/src/backend/k8s/kube_api.rs index 3ff3103fdff280..df2531c94ec84f 100644 --- a/testsuite/forge/src/backend/k8s/kube_api.rs +++ b/testsuite/forge/src/backend/k8s/kube_api.rs @@ -61,299 +61,148 @@ where #[cfg(test)] pub mod mocks { - use super::*; + use super::ReadWrite; use crate::Result; + use aptos_infallible::Mutex; use async_trait::async_trait; use hyper::StatusCode; - use k8s_openapi::api::{ - apps::v1::StatefulSet, - core::v1::{ConfigMap, Namespace, PersistentVolumeClaim, Pod, Secret, Service}, + use k8s_openapi::Metadata; + use kube::{ + api::{ObjectMeta, PostParams}, + error::ErrorResponse, + Error as KubeError, }; - use kube::{api::PostParams, error::ErrorResponse, Error as KubeError}; + use std::{collections::BTreeMap, sync::Arc}; - // Mock StatefulSet API - - pub struct MockStatefulSetApi { - stateful_set: StatefulSet, - } - - impl MockStatefulSetApi { - pub fn from_stateful_set(stateful_set: StatefulSet) -> Self { - MockStatefulSetApi { stateful_set } - } - } - - #[async_trait] - impl ReadWrite for MockStatefulSetApi { - async fn get(&self, name: &str) -> Result { - if self.stateful_set.metadata.name == Some(name.to_string()) { - return Ok(self.stateful_set.clone()); - } - return Err(KubeError::Api(ErrorResponse { - status: "failed".to_string(), - message: format!( - "StatefulSet with name {} could not be found in {:?}", - name, self.stateful_set - ), - reason: "not_found".to_string(), - code: 404, - })); - } - - async fn create( - &self, - _pp: &PostParams, - stateful_set: &StatefulSet, - ) -> Result { - if self.stateful_set.metadata.name == stateful_set.metadata.name { - return Err(KubeError::Api(ErrorResponse { - status: "failed".to_string(), - message: format!( - "StatefulSet with same name already exists in {:?}", - self.stateful_set - ), - reason: "already_exists".to_string(), - code: 409, - })); - } - Ok(self.stateful_set.clone()) - } + /// Generic k8s resource mock API where resource names are unique. Use it to mock namespaced resources or cluster-wide resources, but + /// not resources across multiple namespaces. + #[derive(Clone)] + pub struct MockK8sResourceApi { + resources: Arc>>, } - // Mock Pod API - - pub struct MockPodApi { - pod: Pod, - } - - impl MockPodApi { - pub fn from_pod(pod: Pod) -> Self { - MockPodApi { pod } - } - } - - #[async_trait] - impl ReadWrite for MockPodApi { - async fn get(&self, _name: &str) -> Result { - Ok(self.pod.clone()) - } - - async fn create(&self, _pp: &PostParams, _pod: &Pod) -> Result { - Ok(self.pod.clone()) - } - } - - // Mock ConfigMap API - - pub struct MockConfigMapApi { - config_map: ConfigMap, - } - - impl MockConfigMapApi { - pub fn from_config_map(config_map: ConfigMap) -> Self { - MockConfigMapApi { config_map } - } - } - - #[async_trait] - impl ReadWrite for MockConfigMapApi { - async fn get(&self, name: &str) -> Result { - if self.config_map.metadata.name == Some(name.to_string()) { - return Ok(self.config_map.clone()); - } - return Err(KubeError::Api(ErrorResponse { - status: "failed".to_string(), - message: format!( - "ConfigMap with name {} could not be found in {:?}", - name, self.config_map - ), - reason: "not_found".to_string(), - code: 404, - })); - } - - async fn create( - &self, - _pp: &PostParams, - config_map: &ConfigMap, - ) -> Result { - if self.config_map.metadata.name == config_map.metadata.name { - return Err(KubeError::Api(ErrorResponse { - status: "failed".to_string(), - message: format!( - "ConfigMap with same name already exists in {:?}", - self.config_map - ), - reason: "already_exists".to_string(), - code: 409, - })); + impl MockK8sResourceApi + where + T: Clone + Metadata + Send + Sync, // Ensure T has the necessary traits + { + pub fn new() -> Self { + MockK8sResourceApi { + resources: Arc::new(Mutex::new(BTreeMap::new())), } - Ok(self.config_map.clone()) } - } - - // Mock PersistentVolumeClaim API - pub struct MockPersistentVolumeClaimApi { - persistent_volume_claim: PersistentVolumeClaim, - } - - impl MockPersistentVolumeClaimApi { - pub fn from_persistent_volume_claim( - persistent_volume_claim: PersistentVolumeClaim, - ) -> Self { - MockPersistentVolumeClaimApi { - persistent_volume_claim, + pub fn from_resource(resource: T) -> Self { + let resources = Arc::new(Mutex::new(BTreeMap::new())); + resources.lock().insert( + resource + .metadata() + .name + .as_ref() + .expect("Expected metadata to have name") + .clone(), + resource.clone(), + ); + MockK8sResourceApi { resources } + } + + pub fn from_resources(resources_vec: Vec) -> Self { + let resources = Arc::new(Mutex::new(BTreeMap::new())); + for resource in resources_vec { + resources.lock().insert( + resource + .metadata() + .name + .as_ref() + .expect("Expected metadata to have name") + .clone(), + resource.clone(), + ); } + MockK8sResourceApi { resources } } } #[async_trait] - impl ReadWrite for MockPersistentVolumeClaimApi { - async fn get(&self, name: &str) -> Result { - if self.persistent_volume_claim.metadata.name == Some(name.to_string()) { - return Ok(self.persistent_volume_claim.clone()); - } - return Err(KubeError::Api(ErrorResponse { - status: "failed".to_string(), - message: format!( - "PersistentVolumeClaim with name {} could not be found in {:?}", - name, self.persistent_volume_claim - ), - reason: "not_found".to_string(), - code: 404, - })); - } - - async fn create( - &self, - _pp: &PostParams, - persistent_volume_claim: &PersistentVolumeClaim, - ) -> Result { - if self.persistent_volume_claim.metadata.name == persistent_volume_claim.metadata.name { - return Err(KubeError::Api(ErrorResponse { + impl ReadWrite for MockK8sResourceApi + where + T: Clone + Metadata + Send + Sync, // Ensure T has the necessary traits + { + async fn get(&self, name: &str) -> Result { + let resources = self.resources.lock(); + if let Some(resource) = resources.get(name) { + Ok(resource.clone()) + } else { + Err(KubeError::Api(ErrorResponse { status: "failed".to_string(), - message: format!( - "PersistentVolumeClaim with same name already exists in {:?}", - self.persistent_volume_claim - ), - reason: "already_exists".to_string(), - code: 409, - })); - } - Ok(self.persistent_volume_claim.clone()) - } - } - - // Mock Service API - - pub struct MockServiceApi { - service: Service, - } - - impl MockServiceApi { - pub fn from_service(service: Service) -> Self { - MockServiceApi { service } - } - } - - #[async_trait] - impl ReadWrite for MockServiceApi { - async fn get(&self, name: &str) -> Result { - if self.service.metadata.name == Some(name.to_string()) { - return Ok(self.service.clone()); + message: format!("Resource with name {} could not be found", name), + reason: "not_found".to_string(), + code: 404, + })) } - return Err(KubeError::Api(ErrorResponse { - status: "failed".to_string(), - message: format!( - "Service with name {} could not be found in {:?}", - name, self.service - ), - reason: "not_found".to_string(), - code: 404, - })); } - async fn create(&self, _pp: &PostParams, service: &Service) -> Result { - if self.service.metadata.name == service.metadata.name { + async fn create(&self, _pp: &PostParams, resource: &T) -> Result { + let mut resources = self.resources.lock(); + if resources.contains_key( + resource + .metadata() + .name + .as_ref() + .expect("Expected metadata to have name"), + ) { return Err(KubeError::Api(ErrorResponse { status: "failed".to_string(), - message: format!( - "Service with same name already exists in {:?}", - self.service - ), + message: format!("Resource with same name already exists"), reason: "already_exists".to_string(), code: 409, })); } - Ok(self.service.clone()) - } - } - - // Mock Service API - pub struct MockSecretApi { - secret: Option, - } - - impl MockSecretApi { - pub fn from_secret(secret: Option) -> Self { - MockSecretApi { secret } - } - } - - #[async_trait] - impl ReadWrite for MockSecretApi { - async fn get(&self, _name: &str) -> Result { - match self.secret { - Some(ref s) => Ok(s.clone()), - None => Err(KubeError::Api(ErrorResponse { - status: "status".to_string(), - message: "message".to_string(), - reason: "reason".to_string(), - code: 404, - })), - } - } - - async fn create(&self, _pp: &PostParams, secret: &Secret) -> Result { - return Ok(secret.clone()); + resources.insert( + resource + .metadata() + .name + .as_ref() + .expect("Expected metadata to have name") + .clone(), + resource.clone(), + ); + Ok(resource.clone()) } } // Mock API that always fails to create a new Namespace - pub struct FailedNamespacesApi { + pub struct FailedK8sResourceApi { status_code: u16, } - impl FailedNamespacesApi { + impl FailedK8sResourceApi { pub fn from_status_code(status_code: u16) -> Self { - FailedNamespacesApi { status_code } + FailedK8sResourceApi { status_code } } } #[async_trait] - impl ReadWrite for FailedNamespacesApi { - async fn get(&self, _name: &str) -> Result { + impl ReadWrite for FailedK8sResourceApi + where + T: Clone + Metadata + Send + Sync, // Ensure T has the necessary traits + { + async fn get(&self, _name: &str) -> Result { let status = StatusCode::from_u16(self.status_code).unwrap(); Err(KubeError::Api(ErrorResponse { status: status.to_string(), code: status.as_u16(), - message: "Failed to get namespace".to_string(), + message: "Failed to get resource".to_string(), reason: "Failed to parse error data".into(), })) } - async fn create( - &self, - _pp: &PostParams, - _namespace: &Namespace, - ) -> Result { + async fn create(&self, _pp: &PostParams, _resource: &T) -> Result { let status = StatusCode::from_u16(self.status_code).unwrap(); Err(KubeError::Api(ErrorResponse { status: status.to_string(), code: status.as_u16(), - message: "Failed to create namespace".to_string(), + message: "Failed to create resource".to_string(), reason: "Failed to parse error data".into(), })) } diff --git a/testsuite/forge/src/backend/k8s/mod.rs b/testsuite/forge/src/backend/k8s/mod.rs index 105495ca66f683..b2496b89d14426 100644 --- a/testsuite/forge/src/backend/k8s/mod.rs +++ b/testsuite/forge/src/backend/k8s/mod.rs @@ -19,6 +19,9 @@ pub mod prometheus; mod stateful_set; mod swarm; +use super::{ + ForgeDeployerManager, ForgeDeployerType, ForgeDeployerValues, DEFAULT_FORGE_DEPLOYER_PROFILE, +}; use aptos_sdk::crypto::ed25519::ED25519_PRIVATE_KEY_LENGTH; pub use cluster_helper::*; pub use constants::*; @@ -39,6 +42,7 @@ pub struct K8sFactory { reuse: bool, keep: bool, enable_haproxy: bool, + enable_indexer: bool, } impl K8sFactory { @@ -50,6 +54,7 @@ impl K8sFactory { reuse: bool, keep: bool, enable_haproxy: bool, + enable_indexer: bool, ) -> Result { let root_key: [u8; ED25519_PRIVATE_KEY_LENGTH] = hex::decode(DEFAULT_ROOT_PRIV_KEY)?.try_into().unwrap(); @@ -78,6 +83,7 @@ impl K8sFactory { reuse, keep, enable_haproxy, + enable_indexer, }) } } @@ -148,14 +154,14 @@ impl Factory for K8sFactory { // We return early here if there are not enough PVs to claim. check_persistent_volumes( - kube_client, + kube_client.clone(), num_validators.get() + num_fullnodes, existing_db_tag, ) .await?; } // try installing testnet resources, but clean up if it fails - match install_testnet_resources( + let (new_era, validators, fullnodes) = match install_testnet_resources( self.kube_namespace.clone(), num_validators.get(), num_fullnodes, @@ -174,7 +180,29 @@ impl Factory for K8sFactory { uninstall_testnet_resources(self.kube_namespace.clone()).await?; bail!(e); }, + }; + + // add an indexer too! + if self.enable_indexer { + // NOTE: by default, use a deploy profile and no additional configuration values + let values = ForgeDeployerValues { + profile: DEFAULT_FORGE_DEPLOYER_PROFILE.to_string(), + era: new_era.clone().expect("Era not set in created testnet"), + namespace: self.kube_namespace.clone(), + indexer_grpc_values: None, + indexer_processor_values: None, + }; + + let forge_deployer_manager = + ForgeDeployerManager::from_k8s_client(kube_client.clone(), values); + + forge_deployer_manager.ensure_namespace_prepared().await?; + forge_deployer_manager + .start(ForgeDeployerType::Indexer) + .await?; } + + (new_era, validators, fullnodes) }; let swarm = K8sSwarm::new( diff --git a/testsuite/forge/src/backend/k8s/prometheus.rs b/testsuite/forge/src/backend/k8s/prometheus.rs index 93367f590fbe06..88a1757244c20d 100644 --- a/testsuite/forge/src/backend/k8s/prometheus.rs +++ b/testsuite/forge/src/backend/k8s/prometheus.rs @@ -212,7 +212,7 @@ pub async fn query_range_with_metadata( #[cfg(test)] mod tests { use super::*; - use crate::MockSecretApi; + use crate::MockK8sResourceApi; use k8s_openapi::ByteString; use kube::api::ObjectMeta; use prometheus_http_query::Error as PrometheusError; @@ -223,7 +223,7 @@ mod tests { #[tokio::test] async fn test_create_client_secret() { - let secret_api = Arc::new(MockSecretApi::from_secret(Some(Secret { + let secret_api = Arc::new(MockK8sResourceApi::from_resource(Secret { metadata: ObjectMeta { name: Some("prometheus-read-only".to_string()), ..ObjectMeta::default() @@ -241,7 +241,7 @@ mod tests { string_data: None, type_: None, immutable: None, - }))); + })); create_prometheus_client_from_environment(secret_api) .await @@ -250,7 +250,7 @@ mod tests { #[tokio::test] async fn test_create_client_none() { - let secret_api = Arc::new(MockSecretApi::from_secret(None)); + let secret_api = Arc::new(MockK8sResourceApi::new()); create_prometheus_client_from_environment(secret_api) .await @@ -259,7 +259,7 @@ mod tests { #[tokio::test] async fn test_create_client_env() { - let secret_api = Arc::new(MockSecretApi::from_secret(None)); + let secret_api = Arc::new(MockK8sResourceApi::new()); env::set_var("PROMETHEUS_URL", "http://prometheus.site"); diff --git a/testsuite/forge/src/backend/k8s/stateful_set.rs b/testsuite/forge/src/backend/k8s/stateful_set.rs index 46079e451162d7..5ec0870814f768 100644 --- a/testsuite/forge/src/backend/k8s/stateful_set.rs +++ b/testsuite/forge/src/backend/k8s/stateful_set.rs @@ -314,7 +314,7 @@ pub async fn check_for_container_restart( #[cfg(test)] mod tests { use super::*; - use crate::{MockPodApi, MockStatefulSetApi}; + use crate::MockK8sResourceApi; use k8s_openapi::{ api::{ apps::v1::{StatefulSet, StatefulSetSpec, StatefulSetStatus}, @@ -327,7 +327,7 @@ mod tests { async fn test_check_stateful_set_status() { // mock a StatefulSet with 0/1 replicas // this should then mean we check the underlying pod to see what's up - let stateful_set_api = Arc::new(MockStatefulSetApi::from_stateful_set(StatefulSet { + let stateful_set_api = Arc::new(MockK8sResourceApi::from_resource(StatefulSet { metadata: ObjectMeta { name: Some("test-stateful-set".to_string()), ..ObjectMeta::default() @@ -344,8 +344,12 @@ mod tests { })); // we should retry if the pod status is not explicitly bad - let pod_default_api = Arc::new(MockPodApi::from_pod(Pod { + let pod_default_api = Arc::new(MockK8sResourceApi::from_resource(Pod { status: Some(PodStatus::default()), + metadata: ObjectMeta { + name: Some("test-stateful-set-0".to_string()), + ..ObjectMeta::default() + }, ..Pod::default() })); let ret = check_stateful_set_status( @@ -361,7 +365,7 @@ mod tests { )); // the pod explicitly has a bad status, so we should fail fast - let pod_default_api = Arc::new(MockPodApi::from_pod(Pod { + let pod_default_api = Arc::new(MockK8sResourceApi::from_resource(Pod { metadata: ObjectMeta { name: Some("test-stateful-set-0".to_string()), ..ObjectMeta::default() diff --git a/testsuite/forge/src/backend/k8s_deployer/README.md b/testsuite/forge/src/backend/k8s_deployer/README.md new file mode 100644 index 00000000000000..6761113324260c --- /dev/null +++ b/testsuite/forge/src/backend/k8s_deployer/README.md @@ -0,0 +1,14 @@ +# Forge K8s Deployer Backend + +This backend manages Forge "deployers", which are k8s jobs that spin up the necessary k8s infrastructure for Forge tests to run. +They mostly involve state management of the Forge namespace, ancillary resources like configmaps, and the deployer jobs themselves. + +Forge deployers: + +- Each deploy a single "component" of Forge infra, which may be dependent on some other components or resources. For example, this can be an indexer stack, which in turn relies on a testnet stack to exist +- Can take in customization values via the env var FORGE_DEPLOY_VALUES_JSON +- Have a known values schema but mostly rely on a "profile" that is suitable for most tests, that contains default sane values + +## Implementation Notes + +Forge Deployers require access to create namespaces, SA, rolebindings, etc. and grant the `cluster-admin` clusterrole to the namespace it creates. As such, Forge should always be run in an isolated k8s cluster diff --git a/testsuite/forge/src/backend/k8s_deployer/constants.rs b/testsuite/forge/src/backend/k8s_deployer/constants.rs new file mode 100644 index 00000000000000..321f3da2a08f62 --- /dev/null +++ b/testsuite/forge/src/backend/k8s_deployer/constants.rs @@ -0,0 +1,18 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub const FORGE_INDEXER_DEPLOYER_DOCKER_IMAGE_REPO: &str = + "us-docker.pkg.dev/aptos-registry/docker/forge-indexer-deployer"; +pub const FORGE_TESTNET_DEPLOYER_DOCKER_IMAGE_REPO: &str = + "us-docker.pkg.dev/aptos-registry/docker/forge-testnet-deployer"; + +/// The version of the forge deployer image to use. +pub const FORGE_DEPLOYER_IMAGE_TAG: &str = "main"; // default to the latest stable build from the main branch + +/// This is the service account name that the deployer will use to deploy the forge components. It may require extra permissions and additonal setup +pub const FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME: &str = "forge"; + +/// This is the environment variable that is required to be set in the pod to provide the deployer +pub const FORGE_DEPLOYER_VALUES_ENV_VAR_NAME: &str = "FORGE_DEPLOY_VALUES_JSON"; + +pub const DEFAULT_FORGE_DEPLOYER_PROFILE: &str = "large"; diff --git a/testsuite/forge/src/backend/k8s_deployer/deployer.rs b/testsuite/forge/src/backend/k8s_deployer/deployer.rs new file mode 100644 index 00000000000000..0be557cf6d8a2a --- /dev/null +++ b/testsuite/forge/src/backend/k8s_deployer/deployer.rs @@ -0,0 +1,444 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::{ + FORGE_DEPLOYER_IMAGE_TAG, FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME, + FORGE_DEPLOYER_VALUES_ENV_VAR_NAME, FORGE_INDEXER_DEPLOYER_DOCKER_IMAGE_REPO, + FORGE_TESTNET_DEPLOYER_DOCKER_IMAGE_REPO, +}; +use crate::{maybe_create_k8s_resource, K8sApi, ReadWrite, Result}; +use k8s_openapi::api::{ + batch::v1::Job, + core::v1::{ConfigMap, Namespace, ServiceAccount}, + rbac::v1::RoleBinding, +}; +use kube::{ + api::{ObjectMeta, PostParams}, + ResourceExt, +}; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, fmt, sync::Arc}; + +/// These are the values that the forge deployer needs to deploy the forge components to the k8s cluster. +/// There are global values such as profile, era, and namespace as well as component-specific values +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct ForgeDeployerValues { + pub profile: String, + pub era: String, + pub namespace: String, + + // component specific values + // TODO: add an options reference. Ideally this customization is almost always optional and instead handled by the profiles + pub indexer_grpc_values: Option, + pub indexer_processor_values: Option, +} + +/// The ForgeDeployerManager is responsible for managing the lifecycle of forge deployers, wihch deploy the +/// forge components to the k8s cluster. +pub struct ForgeDeployerManager { + // all the k8s APIs we need. Specifying each API separately allows for easier testing + pub jobs_api: Arc>, + pub config_maps_api: Arc>, + pub namespace_api: Arc>, + pub serviceaccount_api: Arc>, + pub rolebinding_api: Arc>, + + // the values to use for the deployer, including namespace, era, etc + pub values: ForgeDeployerValues, +} + +#[derive(Clone, Copy)] +pub enum ForgeDeployerType { + Indexer, + Testnet, +} + +impl fmt::Display for ForgeDeployerType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ForgeDeployerType::Indexer => write!(f, "indexer"), + ForgeDeployerType::Testnet => write!(f, "testnet"), + } + } +} + +impl ForgeDeployerManager { + pub fn from_k8s_client(kube_client: kube::Client, values: ForgeDeployerValues) -> Self { + let jobs_api = Arc::new(K8sApi::from_client( + kube_client.clone(), + Some(values.namespace.clone()), + )); + let config_maps_api = Arc::new(K8sApi::from_client( + kube_client.clone(), + Some(values.namespace.clone()), + )); + let namespace_api = Arc::new(K8sApi::from_client(kube_client.clone(), None)); + let serviceaccount_api = Arc::new(K8sApi::from_client( + kube_client.clone(), + Some(values.namespace.clone()), + )); + let rolebinding_api = Arc::new(K8sApi::from_client( + kube_client.clone(), + Some(values.namespace.clone()), + )); + + // ensure it lives long enough between async + Self { + jobs_api, + config_maps_api, + namespace_api, + serviceaccount_api, + rolebinding_api, + values, + } + } + + /// Given a deployer type return the name to use for k8s components + /// This is the canonical name for the deployer and each of its components + pub(crate) fn get_name(&self, deployer_type: ForgeDeployerType) -> String { + format!("deploy-forge-{}-e{}", deployer_type, &self.values.era) + } + + /// Gets a k8s configmap for the forge deployer that contains the values needed to deploy the forge components + /// Does not actually create the configmap in k8s + fn get_forge_deployer_k8s_config_map( + &self, + deployer_type: ForgeDeployerType, + ) -> Result { + let configmap_name = self.get_name(deployer_type); + let deploy_values_json = serde_json::to_string(&self.values)?; + + // create the configmap with values + let config_map = ConfigMap { + metadata: ObjectMeta { + name: Some(configmap_name.clone()), + namespace: Some(self.values.namespace.clone()), + ..Default::default() + }, + data: Some(BTreeMap::from([( + "deploy-values.json".to_string(), + deploy_values_json, + )])), + ..Default::default() + }; + + Ok(config_map) + } + + /// Gets a k8s job for the forge deployer that implements the particular interface that it expects: + /// - Runs the corresponding forge--deployer image + /// - Sets the FORGE_DEPLOY_VALUES_JSON environment variable to the configmap that contains the values + /// Does not actually create the job in k8s + fn get_forge_deployer_k8s_job( + &self, + deployer_type: ForgeDeployerType, + configmap_name: String, + ) -> Result { + let job_name = self.get_name(deployer_type); + let image_repo: &str = match deployer_type { + ForgeDeployerType::Indexer => FORGE_INDEXER_DEPLOYER_DOCKER_IMAGE_REPO, + ForgeDeployerType::Testnet => FORGE_TESTNET_DEPLOYER_DOCKER_IMAGE_REPO, + }; + let image_tag: &str = FORGE_DEPLOYER_IMAGE_TAG; + + let job = Job { + metadata: ObjectMeta { + name: Some(job_name.clone()), + namespace: Some(self.values.namespace.clone()), + ..Default::default() + }, + spec: Some(k8s_openapi::api::batch::v1::JobSpec { + template: k8s_openapi::api::core::v1::PodTemplateSpec { + spec: Some(k8s_openapi::api::core::v1::PodSpec { + service_account_name: Some(FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME.to_string()), + containers: vec![k8s_openapi::api::core::v1::Container { + name: "forge-deployer".to_string(), + image: Some(format!("{}:{}", image_repo, image_tag)), + env: Some(vec![k8s_openapi::api::core::v1::EnvVar { + name: FORGE_DEPLOYER_VALUES_ENV_VAR_NAME.to_string(), + value_from: Some(k8s_openapi::api::core::v1::EnvVarSource { + config_map_key_ref: Some( + k8s_openapi::api::core::v1::ConfigMapKeySelector { + name: Some(configmap_name), + key: "deploy-values.json".to_string(), + ..Default::default() + }, + ), + ..Default::default() + }), + ..Default::default() + }]), + ..Default::default() + }], + restart_policy: Some("Never".to_string()), + ..Default::default() + }), + ..Default::default() + }, + backoff_limit: Some(0), + ..Default::default() + }), + ..Default::default() + }; + + Ok(job) + } + + pub async fn start(&self, deployer_type: ForgeDeployerType) -> Result<()> { + let config_map = self.get_forge_deployer_k8s_config_map(deployer_type)?; + let job = self.get_forge_deployer_k8s_job(deployer_type, config_map.name())?; + self.config_maps_api + .create(&PostParams::default(), &config_map) + .await?; + self.jobs_api.create(&PostParams::default(), &job).await?; + Ok(()) + } + + pub async fn ensure_namespace_prepared(&self) -> Result<()> { + let namespace = Namespace { + metadata: ObjectMeta { + name: Some(self.values.namespace.clone()), + ..Default::default() + }, + ..Default::default() + }; + maybe_create_k8s_resource(self.namespace_api.clone(), namespace.clone()).await?; + + // create a serviceaccount FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME + let service_account = ServiceAccount { + metadata: ObjectMeta { + name: Some(FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME.to_string()), + namespace: Some(namespace.name()), + ..Default::default() + }, + ..Default::default() + }; + maybe_create_k8s_resource(self.serviceaccount_api.clone(), service_account).await?; + + // create a rolebinding for the service account to the clusterrole cluster-admin + let role_binding = RoleBinding { + metadata: ObjectMeta { + name: Some("forge-admin".to_string()), + namespace: Some(namespace.name()), + ..Default::default() + }, + role_ref: k8s_openapi::api::rbac::v1::RoleRef { + api_group: "rbac.authorization.k8s.io".to_string(), + kind: "ClusterRole".to_string(), + name: "cluster-admin".to_string(), + }, + subjects: Some(vec![k8s_openapi::api::rbac::v1::Subject { + kind: "ServiceAccount".to_string(), + name: FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME.to_string(), + namespace: Some(namespace.name()), + ..Default::default() + }]), + }; + maybe_create_k8s_resource(self.rolebinding_api.clone(), role_binding).await?; + Ok(()) + } + + pub async fn completed(&self, deployer_type: ForgeDeployerType) -> Result { + let job_name = self.get_name(deployer_type); + let job = self.jobs_api.get(&job_name).await?; + Ok(job + .status + .expect("Failed to get job status") + .succeeded + .expect("Failed to get job succeeded number") + > 0) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MockK8sResourceApi; + + /// Test creating a forge deployer manager and creating an indexer deployment with it. Nothing + /// exists in the namespace yet + #[tokio::test] + async fn test_start_deployer_fresh_environment() { + let values = ForgeDeployerValues { + profile: "large-banana".to_string(), + era: "1".to_string(), + namespace: "forge-large-banana".to_string(), + indexer_grpc_values: None, + indexer_processor_values: None, + }; + let manager = ForgeDeployerManager { + jobs_api: Arc::new(MockK8sResourceApi::new()), + config_maps_api: Arc::new(MockK8sResourceApi::new()), + namespace_api: Arc::new(MockK8sResourceApi::new()), + serviceaccount_api: Arc::new(MockK8sResourceApi::new()), + rolebinding_api: Arc::new(MockK8sResourceApi::new()), + values, + }; + manager.start(ForgeDeployerType::Indexer).await.unwrap(); + let indexer_deployer_name = manager.get_name(ForgeDeployerType::Indexer); + manager + .jobs_api + .get(&indexer_deployer_name) + .await + .expect(format!("Expected job {} to exist", indexer_deployer_name).as_str()); + manager + .config_maps_api + .get(&indexer_deployer_name) + .await + .expect(format!("Expected configmap {} to exist", indexer_deployer_name).as_str()); + } + + /// Test starting a deployer with an existing job in the namespace. This should fail as the job already exists + /// and we cannot override/mutate it. + #[tokio::test] + async fn test_start_deployer_existing_job() { + let values = ForgeDeployerValues { + profile: "large-banana".to_string(), + era: "1".to_string(), + namespace: "forge-large-banana".to_string(), + indexer_grpc_values: None, + indexer_processor_values: None, + }; + let manager = ForgeDeployerManager { + jobs_api: Arc::new(MockK8sResourceApi::from_resource(Job { + metadata: ObjectMeta { + name: Some("deploy-forge-indexer-e1".to_string()), + namespace: Some("default".to_string()), + ..Default::default() + }, + ..Default::default() + })), + config_maps_api: Arc::new(MockK8sResourceApi::new()), + namespace_api: Arc::new(MockK8sResourceApi::new()), + serviceaccount_api: Arc::new(MockK8sResourceApi::new()), + rolebinding_api: Arc::new(MockK8sResourceApi::new()), + values, + }; + let result = manager.start(ForgeDeployerType::Indexer).await; + assert!(result.is_err()); + } + + /// Test starting a deployer with an existing job in the namespace but a different era. This should be allowed + /// as the new job/deployment will be in a different era and unrelated to the existing job + #[tokio::test] + async fn test_start_deployer_existing_job_different_era() { + let values = ForgeDeployerValues { + profile: "large-banana".to_string(), + era: "2".to_string(), + namespace: "forge-large-banana".to_string(), + indexer_grpc_values: None, + indexer_processor_values: None, + }; + let manager = ForgeDeployerManager { + jobs_api: Arc::new(MockK8sResourceApi::from_resource(Job { + metadata: ObjectMeta { + name: Some("deploy-forge-indexer-e1".to_string()), + namespace: Some("default".to_string()), + ..Default::default() + }, + ..Default::default() + })), + config_maps_api: Arc::new(MockK8sResourceApi::new()), + namespace_api: Arc::new(MockK8sResourceApi::new()), + serviceaccount_api: Arc::new(MockK8sResourceApi::new()), + rolebinding_api: Arc::new(MockK8sResourceApi::new()), + values, + }; + manager.start(ForgeDeployerType::Indexer).await.unwrap(); + } + + /// Test ensure_namespace_prepared creates the namespace, serviceaccount, and rolebinding + /// Collisions should be OK to ensure idempotency + #[tokio::test] + async fn test_ensure_namespace_prepared_fresh_namespace() { + let values = ForgeDeployerValues { + profile: "large-banana".to_string(), + era: "1".to_string(), + namespace: "forge-large-banana".to_string(), + indexer_grpc_values: None, + indexer_processor_values: None, + }; + let manager = ForgeDeployerManager { + jobs_api: Arc::new(MockK8sResourceApi::new()), + config_maps_api: Arc::new(MockK8sResourceApi::new()), + namespace_api: Arc::new(MockK8sResourceApi::new()), + serviceaccount_api: Arc::new(MockK8sResourceApi::new()), + rolebinding_api: Arc::new(MockK8sResourceApi::new()), + values, + }; + manager + .ensure_namespace_prepared() + .await + .expect("Issue ensuring namespace prepared"); + let namespace = manager + .namespace_api + .get("forge-large-banana") + .await + .expect(format!("Expected namespace {} to exist", "forge-large-banana").as_str()); + assert_eq!( + namespace.metadata.name, + Some("forge-large-banana".to_string()) + ); + let serviceaccount = manager + .serviceaccount_api + .get(FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME) + .await + .expect( + format!( + "Expected serviceaccount {} to exist", + FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME + ) + .as_str(), + ); + assert_eq!( + serviceaccount.metadata.name, + Some(FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME.to_string()) + ); + let rolebinding = manager.rolebinding_api.get("forge-admin").await.unwrap(); + assert_eq!(rolebinding.metadata.name, Some("forge-admin".to_string())); + } + + /// Test the same thing but with existing resources. This should not error out and should be idempotent + #[tokio::test] + async fn test_ensure_namespace_prepared_existing_resources() { + let values = ForgeDeployerValues { + profile: "large-banana".to_string(), + era: "1".to_string(), + namespace: "forge-large-banana".to_string(), + indexer_grpc_values: None, + indexer_processor_values: None, + }; + let manager = ForgeDeployerManager { + jobs_api: Arc::new(MockK8sResourceApi::new()), + config_maps_api: Arc::new(MockK8sResourceApi::new()), + namespace_api: Arc::new(MockK8sResourceApi::from_resource(Namespace { + metadata: ObjectMeta { + name: Some("forge-large-banana".to_string()), + ..Default::default() + }, + ..Default::default() + })), + serviceaccount_api: Arc::new(MockK8sResourceApi::from_resource(ServiceAccount { + metadata: ObjectMeta { + name: Some(FORGE_DEPLOYER_SERVICE_ACCOUNT_NAME.to_string()), + namespace: Some("forge-large-banana".to_string()), + ..Default::default() + }, + ..Default::default() + })), + rolebinding_api: Arc::new(MockK8sResourceApi::from_resource(RoleBinding { + metadata: ObjectMeta { + name: Some("forge-admin".to_string()), + namespace: Some("forge-large-banana".to_string()), + ..Default::default() + }, + ..Default::default() + })), + values, + }; + manager + .ensure_namespace_prepared() + .await + .expect("Issue ensuring namespace prepared"); + } +} diff --git a/testsuite/forge/src/backend/k8s_deployer/mod.rs b/testsuite/forge/src/backend/k8s_deployer/mod.rs new file mode 100644 index 00000000000000..8d7ea5711ae0fc --- /dev/null +++ b/testsuite/forge/src/backend/k8s_deployer/mod.rs @@ -0,0 +1,8 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +mod constants; +mod deployer; + +pub use constants::*; +pub use deployer::*; diff --git a/testsuite/forge/src/backend/mod.rs b/testsuite/forge/src/backend/mod.rs index 24b996e75ca45d..5124ef27002e87 100644 --- a/testsuite/forge/src/backend/mod.rs +++ b/testsuite/forge/src/backend/mod.rs @@ -7,3 +7,6 @@ pub use local::{LocalNode, *}; mod k8s; pub use k8s::{K8sNode, *}; + +mod k8s_deployer; +pub use k8s_deployer::*;