diff --git a/.github/workflows/publish-rust-lazer-agent.yml b/.github/workflows/publish-rust-lazer-agent.yml new file mode 100644 index 0000000000..a77c6812c3 --- /dev/null +++ b/.github/workflows/publish-rust-lazer-agent.yml @@ -0,0 +1,18 @@ +name: Publish Rust package pyth-lazer-agent to crates.io + +on: + push: + tags: + - pyth-lazer-agent-v* +jobs: + publish-pyth-lazer-agent: + name: Publish Rust package pyth-lazer-agent to crates.io + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v2 + + - run: cargo publish --token ${CARGO_REGISTRY_TOKEN} + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} + working-directory: "apps/pyth-lazer-agent" diff --git a/Cargo.lock b/Cargo.lock index a23402a01a..b1da1acf7e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3071,7 +3071,7 @@ dependencies = [ [[package]] name = "fortuna" -version = "8.2.2" +version = "8.2.5" dependencies = [ "anyhow", "axum 0.6.20", @@ -4423,6 +4423,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + [[package]] name = "linux-raw-sys" version = "0.4.15" @@ -5611,7 +5617,7 @@ dependencies = [ [[package]] name = "pyth-lazer-agent" -version = "0.3.0" +version = "0.3.3" dependencies = [ "anyhow", "backoff", @@ -5647,10 +5653,11 @@ dependencies = [ [[package]] name = "pyth-lazer-client" -version = "0.1.3" +version = "2.0.0" dependencies = [ "alloy-primitives 0.8.25", "anyhow", + "backoff", "base64 0.22.1", "bincode 1.3.3", "bs58", @@ -5659,12 +5666,14 @@ dependencies = [ "futures-util", "hex", "libsecp256k1 0.7.2", - "pyth-lazer-protocol 0.9.1", + "pyth-lazer-protocol 0.10.0", "serde", "serde_json", "tokio", "tokio-tungstenite 0.20.1", "tracing", + "tracing-subscriber", + "ttl_cache", "url", ] @@ -5687,7 +5696,7 @@ dependencies = [ [[package]] name = "pyth-lazer-protocol" -version = "0.9.1" +version = "0.10.0" dependencies = [ "alloy-primitives 0.8.25", "anyhow", @@ -5727,19 +5736,14 @@ dependencies = [ [[package]] name = "pyth-lazer-publisher-sdk" -version = "0.2.0" +version = "0.2.1" dependencies = [ "anyhow", - "derive_more 2.0.1", "fs-err", - "hex", - "humantime", "protobuf", "protobuf-codegen", - "pyth-lazer-protocol 0.9.1", - "serde", + "pyth-lazer-protocol 0.10.0", "serde_json", - "tracing", ] [[package]] @@ -10299,6 +10303,15 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "ttl_cache" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4189890526f0168710b6ee65ceaedf1460c48a14318ceec933cb26baa492096a" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "tungstenite" version = "0.20.1" diff --git a/apps/fortuna/Cargo.toml b/apps/fortuna/Cargo.toml index f21ee9918d..5bf6fabf48 100644 --- a/apps/fortuna/Cargo.toml +++ b/apps/fortuna/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fortuna" -version = "8.2.2" +version = "8.2.5" edition = "2021" [lib] diff --git a/apps/fortuna/src/api.rs b/apps/fortuna/src/api.rs index 5f7ab2cf24..cc39d416de 100644 --- a/apps/fortuna/src/api.rs +++ b/apps/fortuna/src/api.rs @@ -1,6 +1,7 @@ use { crate::{ chain::reader::{BlockNumber, BlockStatus, EntropyReader}, + config::Config, history::History, state::MonitoredHashChainState, }, @@ -22,9 +23,12 @@ use { tokio::sync::RwLock, url::Url, }; -pub use {chain_ids::*, explorer::*, index::*, live::*, metrics::*, ready::*, revelation::*}; +pub use { + chain_ids::*, config::*, explorer::*, index::*, live::*, metrics::*, ready::*, revelation::*, +}; mod chain_ids; +mod config; mod explorer; mod index; mod live; @@ -73,6 +77,8 @@ pub struct ApiState { pub metrics: Arc, pub explorer_metrics: Arc, + + pub config: Config, } impl ApiState { @@ -80,6 +86,7 @@ impl ApiState { chains: Arc>>, metrics_registry: Arc>, history: Arc, + config: &Config, ) -> ApiState { let metrics = ApiMetrics { http_requests: Family::default(), @@ -100,6 +107,7 @@ impl ApiState { explorer_metrics, history, metrics_registry, + config: config.clone(), } } } @@ -211,6 +219,7 @@ pub fn routes(state: ApiState) -> Router<(), Body> { "/v1/chains/:chain_id/revelations/:sequence", get(revelation), ) + .route("/v1/chains/configs", get(get_chain_configs)) .with_state(state) } @@ -230,9 +239,10 @@ mod test { crate::{ api::{ self, ApiBlockChainState, ApiState, BinaryEncoding, Blob, BlockchainState, - GetRandomValueResponse, + ChainConfigSummary, GetRandomValueResponse, }, chain::reader::{mock::MockEntropyReader, BlockStatus}, + config::Config, history::History, state::{HashChainState, MonitoredHashChainState, PebbleHashChain}, }, @@ -311,10 +321,40 @@ mod test { ApiBlockChainState::Initialized(avax_state), ); + // Create a minimal config for testing + let config = Config { + chains: HashMap::new(), + provider: crate::config::ProviderConfig { + uri: "http://localhost:8080/".to_string(), + address: PROVIDER, + private_key: crate::config::SecretString { + value: Some("0xabcd".to_string()), + file: None, + }, + secret: crate::config::SecretString { + value: Some("abcd".to_string()), + file: None, + }, + chain_length: 100000, + chain_sample_interval: 10, + fee_manager: None, + }, + keeper: crate::config::KeeperConfig { + private_key: crate::config::SecretString { + value: Some("0xabcd".to_string()), + file: None, + }, + fee_manager_private_key: None, + other_keeper_addresses: vec![], + replica_config: None, + }, + }; + let api_state = ApiState::new( Arc::new(RwLock::new(chains)), metrics_registry, Arc::new(History::new().await.unwrap()), + &config, ) .await; @@ -534,4 +574,212 @@ mod test { ) .await; } + + #[tokio::test] + async fn test_chain_configs() { + let (server, _, _) = test_server().await; + + // Test the chain configs endpoint + let response = server.get("/v1/chains/configs").await; + response.assert_status(StatusCode::OK); + + // Parse the response as JSON + let configs: Vec = response.json(); + + // Verify the response structure - should be empty for test server + assert_eq!( + configs.len(), + 0, + "Should return empty configs for test server" + ); + } + + #[tokio::test] + async fn test_chain_configs_with_data() { + use crate::api::get_chain_configs; + use axum::{routing::get, Router}; + + // Create a config with actual chain data + let mut config_chains = HashMap::new(); + config_chains.insert( + "ethereum".to_string(), + crate::config::EthereumConfig { + geth_rpc_addr: "http://localhost:8545".to_string(), + contract_addr: Address::from_low_u64_be(0x1234), + reveal_delay_blocks: 1, + confirmed_block_status: BlockStatus::Latest, + backlog_range: 1000, + legacy_tx: false, + gas_limit: 500000, + priority_fee_multiplier_pct: 100, + escalation_policy: crate::config::EscalationPolicyConfig::default(), + min_profit_pct: 0, + target_profit_pct: 20, + max_profit_pct: 100, + min_keeper_balance: 100000000000000000, + fee: 1500000000000000, + sync_fee_only_on_register: true, + commitments: None, + max_num_hashes: None, + block_delays: vec![5], + }, + ); + config_chains.insert( + "avalanche".to_string(), + crate::config::EthereumConfig { + geth_rpc_addr: "http://localhost:9650".to_string(), + contract_addr: Address::from_low_u64_be(0x5678), + reveal_delay_blocks: 2, + confirmed_block_status: BlockStatus::Latest, + backlog_range: 1000, + legacy_tx: false, + gas_limit: 600000, + priority_fee_multiplier_pct: 100, + escalation_policy: crate::config::EscalationPolicyConfig::default(), + min_profit_pct: 0, + target_profit_pct: 20, + max_profit_pct: 100, + min_keeper_balance: 100000000000000000, + fee: 2000000000000000, + sync_fee_only_on_register: true, + commitments: None, + max_num_hashes: None, + block_delays: vec![5], + }, + ); + + let config = Config { + chains: config_chains, + provider: crate::config::ProviderConfig { + uri: "http://localhost:8080/".to_string(), + address: PROVIDER, + private_key: crate::config::SecretString { + value: Some("0xabcd".to_string()), + file: None, + }, + secret: crate::config::SecretString { + value: Some("abcd".to_string()), + file: None, + }, + chain_length: 100000, + chain_sample_interval: 10, + fee_manager: None, + }, + keeper: crate::config::KeeperConfig { + private_key: crate::config::SecretString { + value: Some("0xabcd".to_string()), + file: None, + }, + fee_manager_private_key: None, + other_keeper_addresses: vec![], + replica_config: None, + }, + }; + + // Create initialized blockchain states with network IDs + let eth_read = Arc::new(MockEntropyReader::with_requests(10, &[])); + let avax_read = Arc::new(MockEntropyReader::with_requests(10, &[])); + + let eth_state = MonitoredHashChainState::new( + ETH_CHAIN.clone(), + Default::default(), + "ethereum".into(), + PROVIDER, + ); + + let eth_blockchain_state = BlockchainState { + id: "ethereum".into(), + network_id: 1, // Ethereum mainnet + state: Arc::new(eth_state), + contract: eth_read.clone(), + provider_address: PROVIDER, + reveal_delay_blocks: 1, + confirmed_block_status: BlockStatus::Latest, + }; + + let avax_state = MonitoredHashChainState::new( + AVAX_CHAIN.clone(), + Default::default(), + "avalanche".into(), + PROVIDER, + ); + + let avax_blockchain_state = BlockchainState { + id: "avalanche".into(), + network_id: 43114, // Avalanche C-Chain + state: Arc::new(avax_state), + contract: avax_read.clone(), + provider_address: PROVIDER, + reveal_delay_blocks: 2, + confirmed_block_status: BlockStatus::Latest, + }; + + // Create chains HashMap with initialized states + let mut chains = HashMap::new(); + chains.insert( + "ethereum".into(), + ApiBlockChainState::Initialized(eth_blockchain_state), + ); + chains.insert( + "avalanche".into(), + ApiBlockChainState::Initialized(avax_blockchain_state), + ); + + // Minimal ApiState for this endpoint + let api_state = ApiState { + chains: Arc::new(RwLock::new(chains)), + history: Arc::new(History::new().await.unwrap()), + metrics_registry: Arc::new(RwLock::new(Registry::default())), + metrics: Arc::new(crate::api::ApiMetrics { + http_requests: prometheus_client::metrics::family::Family::default(), + }), + explorer_metrics: Arc::new( + crate::api::ExplorerMetrics::new(Arc::new(RwLock::new(Registry::default()))).await, + ), + config, + }; + + let app = Router::new() + .route("/v1/chains/configs", get(get_chain_configs)) + .with_state(api_state); + let server = TestServer::new(app).unwrap(); + + // Test the chain configs endpoint + let response = server.get("/v1/chains/configs").await; + response.assert_status(StatusCode::OK); + + // Parse the response as JSON + let configs: Vec = response.json(); + + // Verify we have 2 chains + assert_eq!(configs.len(), 2, "Should return 2 chain configs"); + + // Find ethereum config + let eth_config = configs + .iter() + .find(|c| c.name == "ethereum") + .expect("Ethereum config not found"); + assert_eq!( + eth_config.contract_addr, + "0x0000000000000000000000000000000000001234" + ); + assert_eq!(eth_config.reveal_delay_blocks, 1); + assert_eq!(eth_config.gas_limit, 500000); + assert_eq!(eth_config.default_fee, 1500000000000000); + assert_eq!(eth_config.network_id, 1); // Ethereum mainnet + + // Find avalanche config + let avax_config = configs + .iter() + .find(|c| c.name == "avalanche") + .expect("Avalanche config not found"); + assert_eq!( + avax_config.contract_addr, + "0x0000000000000000000000000000000000005678" + ); + assert_eq!(avax_config.reveal_delay_blocks, 2); + assert_eq!(avax_config.gas_limit, 600000); + assert_eq!(avax_config.default_fee, 2000000000000000); + assert_eq!(avax_config.network_id, 43114); // Avalanche C-Chain + } } diff --git a/apps/fortuna/src/api/config.rs b/apps/fortuna/src/api/config.rs new file mode 100644 index 0000000000..2b9470e2f4 --- /dev/null +++ b/apps/fortuna/src/api/config.rs @@ -0,0 +1,36 @@ +use { + crate::api::{ApiBlockChainState, ApiState, RestError}, + axum::{extract::State, Json}, + serde::Serialize, +}; + +#[derive(Serialize, serde::Deserialize)] +pub struct ChainConfigSummary { + pub name: String, + pub network_id: u64, + pub contract_addr: String, + pub reveal_delay_blocks: u64, + pub gas_limit: u32, + pub default_fee: u128, +} + +pub async fn get_chain_configs( + State(state): State, +) -> Result>, RestError> { + let mut configs = Vec::new(); + for (name, chain) in state.config.chains.iter() { + let network_id = match state.chains.read().await.get(name) { + Some(ApiBlockChainState::Initialized(blockchain_state)) => blockchain_state.network_id, + _ => 0, + }; + configs.push(ChainConfigSummary { + name: name.clone(), + network_id, + contract_addr: format!("0x{:x}", chain.contract_addr), + reveal_delay_blocks: chain.reveal_delay_blocks, + gas_limit: chain.gas_limit, + default_fee: chain.fee, + }); + } + Ok(Json(configs)) +} diff --git a/apps/fortuna/src/chain/ethereum.rs b/apps/fortuna/src/chain/ethereum.rs index fe6464ce85..312c9f9abb 100644 --- a/apps/fortuna/src/chain/ethereum.rs +++ b/apps/fortuna/src/chain/ethereum.rs @@ -274,13 +274,17 @@ impl EntropyReader for PythRandom> { .get_request_v2(provider_address, sequence_number) .call() .await?; - Ok(Some(reader::Request { - provider: request.provider, - sequence_number: request.sequence_number, - block_number: request.block_number, - use_blockhash: request.use_blockhash, - callback_status: reader::RequestCallbackStatus::try_from(request.callback_status)?, - })) + if request.sequence_number == 0 { + Ok(None) + } else { + Ok(Some(reader::Request { + provider: request.provider, + sequence_number: request.sequence_number, + block_number: request.block_number, + use_blockhash: request.use_blockhash, + callback_status: reader::RequestCallbackStatus::try_from(request.callback_status)?, + })) + } } async fn get_block_number(&self, confirmed_block_status: BlockStatus) -> Result { diff --git a/apps/fortuna/src/command/run.rs b/apps/fortuna/src/command/run.rs index a06909de78..c9f630d6b7 100644 --- a/apps/fortuna/src/command/run.rs +++ b/apps/fortuna/src/command/run.rs @@ -28,6 +28,7 @@ pub async fn run_api( chains: Arc>>, metrics_registry: Arc>, history: Arc, + config: &Config, mut rx_exit: watch::Receiver, ) -> Result<()> { #[derive(OpenApi)] @@ -54,7 +55,7 @@ pub async fn run_api( )] struct ApiDoc; - let api_state = api::ApiState::new(chains, metrics_registry, history).await; + let api_state = api::ApiState::new(chains, metrics_registry, history, config).await; // Initialize Axum Router. Note the type here is a `Router` due to the use of the // `with_state` method which replaces `Body` with `State` in the type signature. @@ -85,7 +86,7 @@ pub async fn run_api( pub async fn run(opts: &RunOptions) -> Result<()> { // Load environment variables from a .env file if present - let _ = dotenv::dotenv()?; + let _ = dotenv::dotenv().map_err(|e| anyhow!("Failed to load .env file: {}", e))?; let config = Config::load(&opts.config.config)?; let secret = config.provider.secret.load()?.ok_or(anyhow!( "Please specify a provider secret in the config file." @@ -170,6 +171,7 @@ pub async fn run(opts: &RunOptions) -> Result<()> { chains.clone(), metrics_registry.clone(), history, + &config, rx_exit, ) .await?; diff --git a/apps/hermes/server/Cargo.lock b/apps/hermes/server/Cargo.lock index 55f57f66ca..00414c68f8 100644 --- a/apps/hermes/server/Cargo.lock +++ b/apps/hermes/server/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "Inflector" @@ -1880,7 +1880,7 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermes" -version = "0.10.3" +version = "0.10.4" dependencies = [ "anyhow", "async-trait", diff --git a/apps/hermes/server/Cargo.toml b/apps/hermes/server/Cargo.toml index 3be189cc5c..a21455ec94 100644 --- a/apps/hermes/server/Cargo.toml +++ b/apps/hermes/server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hermes" -version = "0.10.3" +version = "0.10.4" description = "Hermes is an agent that provides Verified Prices from the Pythnet Pyth Oracle." edition = "2021" diff --git a/apps/hermes/server/src/api/rest/get_price_feed.rs b/apps/hermes/server/src/api/rest/get_price_feed.rs index 765b56d755..eba69a7c0a 100644 --- a/apps/hermes/server/src/api/rest/get_price_feed.rs +++ b/apps/hermes/server/src/api/rest/get_price_feed.rs @@ -73,7 +73,7 @@ where ) .await .map_err(|e| { - tracing::warn!( + tracing::debug!( "Error getting price feed {:?} with update data: {:?}", price_id, e diff --git a/apps/hermes/server/src/api/rest/get_vaa.rs b/apps/hermes/server/src/api/rest/get_vaa.rs index d9e3ddfcb9..5353ef7389 100644 --- a/apps/hermes/server/src/api/rest/get_vaa.rs +++ b/apps/hermes/server/src/api/rest/get_vaa.rs @@ -72,7 +72,7 @@ where ) .await .map_err(|e| { - tracing::warn!( + tracing::debug!( "Error getting price feed {:?} with update data: {:?}", price_id, e diff --git a/apps/hermes/server/src/api/rest/get_vaa_ccip.rs b/apps/hermes/server/src/api/rest/get_vaa_ccip.rs index 858eb71945..b04f836b7a 100644 --- a/apps/hermes/server/src/api/rest/get_vaa_ccip.rs +++ b/apps/hermes/server/src/api/rest/get_vaa_ccip.rs @@ -73,7 +73,7 @@ where ) .await .map_err(|e| { - tracing::warn!( + tracing::debug!( "Error getting price feed {:?} with update data: {:?}", price_id, e diff --git a/apps/hermes/server/src/api/rest/latest_price_feeds.rs b/apps/hermes/server/src/api/rest/latest_price_feeds.rs index 22d606dfcd..1681264e60 100644 --- a/apps/hermes/server/src/api/rest/latest_price_feeds.rs +++ b/apps/hermes/server/src/api/rest/latest_price_feeds.rs @@ -72,7 +72,7 @@ where Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest) .await .map_err(|e| { - tracing::warn!( + tracing::debug!( "Error getting price feeds {:?} with update data: {:?}", price_ids, e diff --git a/apps/hermes/server/src/api/rest/latest_vaas.rs b/apps/hermes/server/src/api/rest/latest_vaas.rs index 5b387bb322..1d4c171b91 100644 --- a/apps/hermes/server/src/api/rest/latest_vaas.rs +++ b/apps/hermes/server/src/api/rest/latest_vaas.rs @@ -61,7 +61,7 @@ where Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest) .await .map_err(|e| { - tracing::warn!( + tracing::debug!( "Error getting price feeds {:?} with update data: {:?}", price_ids, e diff --git a/apps/hermes/server/src/api/rest/v2/latest_price_updates.rs b/apps/hermes/server/src/api/rest/v2/latest_price_updates.rs index a249063966..e378db0504 100644 --- a/apps/hermes/server/src/api/rest/v2/latest_price_updates.rs +++ b/apps/hermes/server/src/api/rest/v2/latest_price_updates.rs @@ -79,7 +79,7 @@ where Aggregates::get_price_feeds_with_update_data(state, &price_ids, RequestTime::Latest) .await .map_err(|e| { - tracing::warn!( + tracing::debug!( "Error getting price feeds {:?} with update data: {:?}", price_ids, e diff --git a/apps/hermes/server/src/api/rest/v2/latest_publisher_stake_caps.rs b/apps/hermes/server/src/api/rest/v2/latest_publisher_stake_caps.rs index 7db8729572..454f12b9b4 100644 --- a/apps/hermes/server/src/api/rest/v2/latest_publisher_stake_caps.rs +++ b/apps/hermes/server/src/api/rest/v2/latest_publisher_stake_caps.rs @@ -59,7 +59,7 @@ where Aggregates::get_latest_publisher_stake_caps_with_update_data(state) .await .map_err(|e| { - tracing::warn!( + tracing::debug!( "Error getting publisher stake caps with update data: {:?}", e ); diff --git a/apps/hermes/server/src/api/rest/v2/latest_twaps.rs b/apps/hermes/server/src/api/rest/v2/latest_twaps.rs index 9397a3f066..67c71eb98c 100644 --- a/apps/hermes/server/src/api/rest/v2/latest_twaps.rs +++ b/apps/hermes/server/src/api/rest/v2/latest_twaps.rs @@ -114,7 +114,7 @@ where ) .await .map_err(|e| { - tracing::warn!( + tracing::debug!( "Error getting TWAPs for price IDs {:?} with update data: {:?}", price_ids, e diff --git a/apps/hermes/server/src/state/aggregate.rs b/apps/hermes/server/src/state/aggregate.rs index 9622fe621b..01e0f1e36d 100644 --- a/apps/hermes/server/src/state/aggregate.rs +++ b/apps/hermes/server/src/state/aggregate.rs @@ -664,7 +664,7 @@ where // to guarantee that the start and end messages are window_seconds apart let start_timestamp = if end_messages.is_empty() { // If there are no end messages, we can't calculate a TWAP - tracing::warn!( + tracing::debug!( price_ids = ?price_ids, time = ?end_time, "Could not find TWAP messages" @@ -693,7 +693,7 @@ where .await?; if start_messages.is_empty() { - tracing::warn!( + tracing::debug!( price_ids = ?price_ids, time = ?start_time, "Could not find TWAP messages" diff --git a/apps/pyth-lazer-agent/Cargo.toml b/apps/pyth-lazer-agent/Cargo.toml index 919bb01d6b..7ef3c99512 100644 --- a/apps/pyth-lazer-agent/Cargo.toml +++ b/apps/pyth-lazer-agent/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "pyth-lazer-agent" -version = "0.3.0" +version = "0.3.3" edition = "2024" +description = "Pyth Lazer Agent" +license = "Apache-2.0" +repository = "https://github.com/pyth-network/pyth-crosschain" [dependencies] pyth-lazer-publisher-sdk = "0.1.7" diff --git a/apps/pyth-lazer-agent/src/config.rs b/apps/pyth-lazer-agent/src/config.rs index d0721939e9..795130aa4f 100644 --- a/apps/pyth-lazer-agent/src/config.rs +++ b/apps/pyth-lazer-agent/src/config.rs @@ -1,10 +1,11 @@ -use std::net::SocketAddr; -use std::path::PathBuf; -use std::time::Duration; - use config::{Environment, File}; use derivative::Derivative; use serde::Deserialize; +use std::cmp::min; +use std::fmt::{Debug, Formatter}; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::time::Duration; use url::Url; #[derive(Deserialize, Derivative, Clone, PartialEq)] @@ -12,7 +13,7 @@ use url::Url; pub struct Config { pub listen_address: SocketAddr, pub relayer_urls: Vec, - pub authorization_token: Option, + pub authorization_token: Option, #[derivative(Debug = "ignore")] pub publish_keypair_path: PathBuf, #[serde(with = "humantime_serde", default = "default_publish_interval")] @@ -20,6 +21,18 @@ pub struct Config { pub history_service_url: Option, } +#[derive(Deserialize, Derivative, Clone, PartialEq)] +pub struct AuthorizationToken(pub String); + +impl Debug for AuthorizationToken { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let token_string = self.0.to_ascii_lowercase(); + #[allow(clippy::string_slice, reason = "false positive")] + let last_chars = &token_string[token_string.len() - min(4, token_string.len())..]; + write!(f, "\"...{last_chars}\"") + } +} + fn default_publish_interval() -> Duration { Duration::from_micros(500) } diff --git a/apps/pyth-lazer-agent/src/lazer_publisher.rs b/apps/pyth-lazer-agent/src/lazer_publisher.rs index b453362caf..79bb00b493 100644 --- a/apps/pyth-lazer-agent/src/lazer_publisher.rs +++ b/apps/pyth-lazer-agent/src/lazer_publisher.rs @@ -62,7 +62,7 @@ impl LazerPublisher { let authorization_token = if let Some(authorization_token) = config.authorization_token.clone() { // If authorization_token is configured, use it. - authorization_token + authorization_token.0 } else { // Otherwise, use the base64 pubkey. BASE64_STANDARD.encode(signing_key.verifying_key().to_bytes()) diff --git a/apps/pyth-lazer-agent/src/main.rs b/apps/pyth-lazer-agent/src/main.rs index 7d319a7c4c..28d5acf3ec 100644 --- a/apps/pyth-lazer-agent/src/main.rs +++ b/apps/pyth-lazer-agent/src/main.rs @@ -1,3 +1,4 @@ +use serde::Deserialize; use { crate::lazer_publisher::LazerPublisher, anyhow::Context, @@ -14,38 +15,61 @@ mod publisher_handle; mod relayer_session; mod websocket_utils; -#[derive(Parser)] +#[derive(Parser, Deserialize)] #[command(version)] struct Cli { #[clap(short, long, default_value = "config/config.toml")] config: String, + #[clap(short, long, default_value = "json")] + log_format: LogFormat, +} + +#[derive(clap::ValueEnum, Clone, Deserialize, Default)] +enum LogFormat { + #[default] + Json, + Compact, + Pretty, } #[tokio::main] async fn main() -> anyhow::Result<()> { + let args = Cli::parse(); + init_tracing_subscriber(args.log_format); + + let config = + config::load_config(args.config.to_string()).context("Failed to read config file")?; + info!(?config, "starting lazer-agent"); + + let lazer_publisher = LazerPublisher::new(&config).await; + http_server::run(config, lazer_publisher).await?; + + Ok(()) +} + +fn init_tracing_subscriber(log_format: LogFormat) { #[allow( clippy::expect_used, reason = "application can fail on invalid RUST_LOG" )] - tracing_subscriber::fmt() + let subscriber = tracing_subscriber::fmt() .with_env_filter( EnvFilter::builder() .with_default_directive(LevelFilter::INFO.into()) .from_env() .expect("invalid RUST_LOG env var"), ) - .with_span_events(FmtSpan::NONE) - .json() - .with_span_list(false) - .init(); + .with_span_events(FmtSpan::NONE); - let args = Cli::parse(); - let config = - config::load_config(args.config.to_string()).context("Failed to read config file")?; - info!(?config, "starting lazer-agent"); - - let lazer_publisher = LazerPublisher::new(&config).await; - http_server::run(config, lazer_publisher).await?; - - Ok(()) + match log_format { + LogFormat::Json => { + subscriber.json().with_span_list(false).init(); + } + LogFormat::Compact => { + subscriber.compact().init(); + } + LogFormat::Pretty => { + subscriber.pretty().init(); + } + } } diff --git a/contract_manager/scripts/update_all_pricefeeds.ts b/contract_manager/scripts/update_all_pricefeeds.ts index fcfe4e7b22..f28b88a208 100644 --- a/contract_manager/scripts/update_all_pricefeeds.ts +++ b/contract_manager/scripts/update_all_pricefeeds.ts @@ -75,6 +75,8 @@ async function main() { ), ), ); + // Wait for 2 seconds to avoid rate limiting and nonce collision + await new Promise((resolve) => setTimeout(resolve, 2000)); } } diff --git a/contract_manager/store/chains/EvmChains.json b/contract_manager/store/chains/EvmChains.json index 1d82100973..d79ae960e3 100644 --- a/contract_manager/store/chains/EvmChains.json +++ b/contract_manager/store/chains/EvmChains.json @@ -749,7 +749,7 @@ { "id": "polynomial_testnet", "mainnet": false, - "rpcUrl": "https://rpc-polynomial-network-testnet-x0tryg8u1c.t.conduit.xyz", + "rpcUrl": "https://rpc.sepolia.polynomial.fi", "networkId": 80008, "type": "EvmChain" }, diff --git a/contract_manager/store/contracts/EvmExecutorContracts.json b/contract_manager/store/contracts/EvmExecutorContracts.json index 57023ef3d8..0de09ccf4c 100644 --- a/contract_manager/store/contracts/EvmExecutorContracts.json +++ b/contract_manager/store/contracts/EvmExecutorContracts.json @@ -198,5 +198,15 @@ "chain": "ethereal_testnet", "address": "0xD458261E832415CFd3BAE5E416FdF3230ce6F134", "type": "EvmExecutorContract" + }, + { + "chain": "polynomial_testnet", + "address": "0xf0a1b566B55e0A0CB5BeF52Eb2a57142617Bee67", + "type": "EvmExecutorContract" + }, + { + "chain": "polynomial", + "address": "0x23f0e8FAeE7bbb405E7A7C3d60138FCfd43d7509", + "type": "EvmExecutorContract" } -] \ No newline at end of file +] diff --git a/governance/remote_executor/programs/remote-executor/Cargo.toml b/governance/remote_executor/programs/remote-executor/Cargo.toml index 900299be31..d212422856 100644 --- a/governance/remote_executor/programs/remote-executor/Cargo.toml +++ b/governance/remote_executor/programs/remote-executor/Cargo.toml @@ -30,13 +30,14 @@ sonic_testnet = [] atlas_testnet = [] mantis_mainnet = [] sonic_mainnet = [] +fogo_testnet = [] [dependencies] -anchor-lang = {version = "0.30.1", features = ["init-if-needed"]} -wormhole-solana = { git = "https://github.com/guibescos/wormhole-solana", rev="f14b3b54c1e37e1aaf8c2ac2a5e236832ffdb3c2"} +anchor-lang = { version = "0.30.1", features = ["init-if-needed"] } +wormhole-solana = { git = "https://github.com/guibescos/wormhole-solana", rev = "f14b3b54c1e37e1aaf8c2ac2a5e236832ffdb3c2" } wormhole-sdk = { git = "https://github.com/wormhole-foundation/wormhole", tag = "v2.17.1" } -serde_wormhole = { git = "https://github.com/wormhole-foundation/wormhole", tag = "v2.17.1"} +serde_wormhole = { git = "https://github.com/wormhole-foundation/wormhole", tag = "v2.17.1" } boolinator = "2.4.0" [dev-dependencies] diff --git a/governance/remote_executor/programs/remote-executor/src/state/governance_payload.rs b/governance/remote_executor/programs/remote-executor/src/state/governance_payload.rs index ec74f34fa3..963e36aa25 100644 --- a/governance/remote_executor/programs/remote-executor/src/state/governance_payload.rs +++ b/governance/remote_executor/programs/remote-executor/src/state/governance_payload.rs @@ -19,6 +19,7 @@ pub const CHAIN_ID_ARRAY: &[(&str, u16)] = &[ ("atlas_testnet", 40007), ("mantis_mainnet", 40008), ("sonic_mainnet", 40009), + ("fogo_testnet", 40010), ]; #[cfg(any(feature = "pythnet", feature = "pythtest"))] @@ -51,6 +52,9 @@ pub const CHAIN_ID: u16 = 40008; #[cfg(feature = "sonic_mainnet")] pub const CHAIN_ID: u16 = 40009; +#[cfg(feature = "fogo_testnet")] +pub const CHAIN_ID: u16 = 40010; + #[derive(AnchorDeserialize, AnchorSerialize, Debug, PartialEq, Eq)] pub struct ExecutorPayload { pub header: GovernanceHeader, diff --git a/governance/remote_executor/programs/remote-executor/src/state/posted_vaa.rs b/governance/remote_executor/programs/remote-executor/src/state/posted_vaa.rs index 02747af01d..c987c1dadd 100644 --- a/governance/remote_executor/programs/remote-executor/src/state/posted_vaa.rs +++ b/governance/remote_executor/programs/remote-executor/src/state/posted_vaa.rs @@ -26,6 +26,7 @@ impl Owner for AnchorVaa { feature = "atlas_testnet", feature = "mantis_mainnet", feature = "sonic_mainnet", + feature = "fogo_testnet", ))] fn owner() -> Pubkey { Pubkey::from_str("HDwcJBJXjL9FpJ7UBsYBtaDjsBUhuLCUYoz3zr8SWWaQ").unwrap() diff --git a/governance/xc_admin/packages/xc_admin_common/src/chains.ts b/governance/xc_admin/packages/xc_admin_common/src/chains.ts index 70439d8042..147e15d061 100644 --- a/governance/xc_admin/packages/xc_admin_common/src/chains.ts +++ b/governance/xc_admin/packages/xc_admin_common/src/chains.ts @@ -33,6 +33,7 @@ export const RECEIVER_CHAINS = { atlas_testnet: 40007, mantis_mainnet: 40008, sonic_mainnet: 40009, + fogo_testnet: 40010, cronos: 60001, kcc: 60002, diff --git a/lazer/contracts/evm/script/PythLazerChangeOwnership.s.sol b/lazer/contracts/evm/script/PythLazerChangeOwnership.s.sol index 62bb1b7410..b9dcbc7773 100644 --- a/lazer/contracts/evm/script/PythLazerChangeOwnership.s.sol +++ b/lazer/contracts/evm/script/PythLazerChangeOwnership.s.sol @@ -1,36 +1,65 @@ // SPDX-License-Identifier: UNLICENSED pragma solidity ^0.8.13; +// --- Script Purpose --- +// This script transfers ownership of the deployed PythLazer contract (proxy) to a new owner contract (typically the governance executor contract). +// Usage: Run this script after deploying the new executor contract on the target chain. Ensure the executor address is correct and deployed. +// Preconditions: +// - The LAZER_PROXY_ADDRESS must point to the deployed PythLazer proxy contract. Currently set to 0xACeA761c27A909d4D3895128EBe6370FDE2dF481, which was made using createX. +// - The NEW_OWNER must be the deployed executor contract address on this chain. +// - The script must be run by the current owner (OLD_OWNER) of the PythLazer contract. +// - The DEPLOYER_PRIVATE_KEY environment variable must be set to the current owner's private key. +// +// Steps: +// 1. Log current and new owner addresses, and the proxy address. +// 2. Check the current owner matches the expected OLD_OWNER. +// 3. Transfer ownership to the NEW_OWNER (executor contract). +// 4. Log the new owner for verification. +// +// Note: This script is intended for use with Foundry (forge-std) tooling. + import {Script, console} from "forge-std/Script.sol"; import {PythLazer} from "../src/PythLazer.sol"; import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol"; import {UUPSUpgradeable} from "@openzeppelin/contracts/proxy/utils/UUPSUpgradeable.sol"; +// Main script contract for ownership transfer contract PythLazerChangeOwnership is Script { + // Address of the deployed PythLazer proxy contract address public constant LAZER_PROXY_ADDRESS = address(0xACeA761c27A909d4D3895128EBe6370FDE2dF481); + // Private key of the current owner, loaded from environment variable uint256 public OLD_OWNER_PRIVATE_KEY = vm.envUint("DEPLOYER_PRIVATE_KEY"); + // Current owner address, derived from private key address public OLD_OWNER = vm.addr(OLD_OWNER_PRIVATE_KEY); - // EVM Executor Contract + // Address of the new owner (should be the deployed executor contract) address public NEW_OWNER = vm.envAddress("NEW_OWNER"); + // Entry point for the script function run() public { + // Log relevant addresses for traceability console.log("Old owner: %s", OLD_OWNER); console.log("New owner: %s", NEW_OWNER); console.log("Lazer proxy address: %s", LAZER_PROXY_ADDRESS); console.log("Lazer owner: %s", PythLazer(LAZER_PROXY_ADDRESS).owner()); console.log("Moving ownership from %s to %s", OLD_OWNER, NEW_OWNER); + // Get the PythLazer contract instance at the proxy address PythLazer lazer = PythLazer(LAZER_PROXY_ADDRESS); + // Start broadcasting transactions as the old owner vm.startBroadcast(OLD_OWNER_PRIVATE_KEY); + // Ensure the current owner matches the expected old owner require(lazer.owner() == OLD_OWNER, "Old owner mismatch"); + // Transfer ownership to the new owner (executor contract) lazer.transferOwnership(NEW_OWNER); console.log("Ownership transferred"); + // Log the new owner for verification console.log( "New Lazer owner: %s", PythLazer(LAZER_PROXY_ADDRESS).owner() ); + // Stop broadcasting vm.stopBroadcast(); } } diff --git a/lazer/contracts/solana/programs/pyth-lazer-solana-contract/Cargo.toml b/lazer/contracts/solana/programs/pyth-lazer-solana-contract/Cargo.toml index d7d359191e..6eee9d3173 100644 --- a/lazer/contracts/solana/programs/pyth-lazer-solana-contract/Cargo.toml +++ b/lazer/contracts/solana/programs/pyth-lazer-solana-contract/Cargo.toml @@ -22,7 +22,7 @@ no-log-ix-name = [] idl-build = ["anchor-lang/idl-build"] [dependencies] -pyth-lazer-protocol = { path = "../../../../sdk/rust/protocol", version = "0.9.1" } +pyth-lazer-protocol = { path = "../../../../sdk/rust/protocol", version = "0.10.0" } anchor-lang = "0.30.1" bytemuck = "1.20.0" diff --git a/lazer/publisher_sdk/rust/Cargo.toml b/lazer/publisher_sdk/rust/Cargo.toml index 2aad9db22c..4d85f56c3b 100644 --- a/lazer/publisher_sdk/rust/Cargo.toml +++ b/lazer/publisher_sdk/rust/Cargo.toml @@ -1,21 +1,16 @@ [package] name = "pyth-lazer-publisher-sdk" -version = "0.2.0" +version = "0.2.1" edition = "2021" description = "Pyth Lazer Publisher SDK types." license = "Apache-2.0" repository = "https://github.com/pyth-network/pyth-crosschain" [dependencies] -pyth-lazer-protocol = { version = "0.9.1", path = "../../sdk/rust/protocol" } +pyth-lazer-protocol = { version = "0.10.0", path = "../../sdk/rust/protocol" } anyhow = "1.0.98" protobuf = "3.7.2" -humantime = "2.2.0" -tracing = "0.1.41" -serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.140" -derive_more = { version = "2.0.1", features = ["from"] } -hex = "0.4.3" [build-dependencies] fs-err = "3.1.0" diff --git a/lazer/sdk/rust/client/Cargo.toml b/lazer/sdk/rust/client/Cargo.toml index 7011711960..22491675e7 100644 --- a/lazer/sdk/rust/client/Cargo.toml +++ b/lazer/sdk/rust/client/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "pyth-lazer-client" -version = "0.1.3" +version = "2.0.0" edition = "2021" description = "A Rust client for Pyth Lazer" license = "Apache-2.0" [dependencies] -pyth-lazer-protocol = { path = "../protocol", version = "0.9.1" } +pyth-lazer-protocol = { path = "../protocol", version = "0.10.0" } tokio = { version = "1", features = ["full"] } tokio-tungstenite = { version = "0.20", features = ["native-tls"] } futures-util = "0.3" @@ -17,6 +17,9 @@ anyhow = "1.0" tracing = "0.1" url = "2.4" derive_more = { version = "1.0.0", features = ["from"] } +backoff = { version = "0.4.0", features = ["futures", "tokio"] } +ttl_cache = "0.5.1" + [dev-dependencies] bincode = "1.3.3" @@ -25,3 +28,4 @@ hex = "0.4.3" libsecp256k1 = "0.7.1" bs58 = "0.5.1" alloy-primitives = "0.8.19" +tracing-subscriber = { version = "0.3.19", features = ["env-filter", "json"] } diff --git a/lazer/sdk/rust/client/examples/subscribe_price_feeds.rs b/lazer/sdk/rust/client/examples/subscribe_price_feeds.rs index 30efd2a8b8..e39c4dc41b 100644 --- a/lazer/sdk/rust/client/examples/subscribe_price_feeds.rs +++ b/lazer/sdk/rust/client/examples/subscribe_price_feeds.rs @@ -1,6 +1,9 @@ +use std::time::Duration; + use base64::Engine; -use futures_util::StreamExt; -use pyth_lazer_client::{AnyResponse, LazerClient}; +use pyth_lazer_client::backoff::PythLazerExponentialBackoffBuilder; +use pyth_lazer_client::client::PythLazerClientBuilder; +use pyth_lazer_client::ws_connection::AnyResponse; use pyth_lazer_protocol::message::{ EvmMessage, LeEcdsaMessage, LeUnsignedMessage, Message, SolanaMessage, }; @@ -9,8 +12,10 @@ use pyth_lazer_protocol::router::{ Channel, DeliveryFormat, FixedRate, Format, JsonBinaryEncoding, PriceFeedId, PriceFeedProperty, SubscriptionParams, SubscriptionParamsRepr, }; -use pyth_lazer_protocol::subscription::{Request, Response, SubscribeRequest, SubscriptionId}; +use pyth_lazer_protocol::subscription::{Response, SubscribeRequest, SubscriptionId}; use tokio::pin; +use tracing::level_filters::LevelFilter; +use tracing_subscriber::EnvFilter; fn get_lazer_access_token() -> String { // Place your access token in your env at LAZER_ACCESS_TOKEN or set it here @@ -20,11 +25,32 @@ fn get_lazer_access_token() -> String { #[tokio::main] async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .from_env()?, + ) + .json() + .init(); + // Create and start the client - let mut client = LazerClient::new( - "wss://pyth-lazer.dourolabs.app/v1/stream", - &get_lazer_access_token(), - )?; + let mut client = PythLazerClientBuilder::new(get_lazer_access_token()) + // Optionally override the default endpoints + .with_endpoints(vec![ + "wss://pyth-lazer-0.dourolabs.app/v1/stream".parse()?, + "wss://pyth-lazer-1.dourolabs.app/v1/stream".parse()?, + ]) + // Optionally set the number of connections + .with_num_connections(4) + // Optionally set the backoff strategy + .with_backoff(PythLazerExponentialBackoffBuilder::default().build()) + // Optionally set the timeout for each connection + .with_timeout(Duration::from_secs(5)) + // Optionally set the channel capacity for responses + .with_channel_capacity(1000) + .build()?; + let stream = client.start().await?; pin!(stream); @@ -72,16 +98,16 @@ async fn main() -> anyhow::Result<()> { ]; for req in subscription_requests { - client.subscribe(Request::Subscribe(req)).await?; + client.subscribe(req).await?; } println!("Subscribed to price feeds. Waiting for updates..."); // Process the first few updates let mut count = 0; - while let Some(msg) = stream.next().await { + while let Some(msg) = stream.recv().await { // The stream gives us base64-encoded binary messages. We need to decode, parse, and verify them. - match msg? { + match msg { AnyResponse::Json(msg) => match msg { Response::StreamUpdated(update) => { println!("Received a JSON update for {:?}", update.subscription_id); @@ -189,8 +215,6 @@ async fn main() -> anyhow::Result<()> { println!("Unsubscribed from {sub_id:?}"); } - tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; - client.close().await?; Ok(()) } diff --git a/lazer/sdk/rust/client/src/backoff.rs b/lazer/sdk/rust/client/src/backoff.rs new file mode 100644 index 0000000000..b3218b7abe --- /dev/null +++ b/lazer/sdk/rust/client/src/backoff.rs @@ -0,0 +1,118 @@ +//! Exponential backoff implementation for Pyth Lazer client. +//! +//! This module provides a wrapper around the [`backoff`] crate's exponential backoff functionality, +//! offering a simplified interface tailored for Pyth Lazer client operations. + +use std::time::Duration; + +use backoff::{ + default::{INITIAL_INTERVAL_MILLIS, MAX_INTERVAL_MILLIS, MULTIPLIER, RANDOMIZATION_FACTOR}, + ExponentialBackoff, ExponentialBackoffBuilder, +}; + +/// A wrapper around the backoff crate's exponential backoff configuration. +/// +/// This struct encapsulates the parameters needed to configure exponential backoff +/// behavior and can be converted into the backoff crate's [`ExponentialBackoff`] type. +#[derive(Debug)] +pub struct PythLazerExponentialBackoff { + /// The initial retry interval. + initial_interval: Duration, + /// The randomization factor to use for creating a range around the retry interval. + /// + /// A randomization factor of 0.5 results in a random period ranging between 50% below and 50% + /// above the retry interval. + randomization_factor: f64, + /// The value to multiply the current interval with for each retry attempt. + multiplier: f64, + /// The maximum value of the back off period. Once the retry interval reaches this + /// value it stops increasing. + max_interval: Duration, +} + +impl From for ExponentialBackoff { + fn from(val: PythLazerExponentialBackoff) -> Self { + ExponentialBackoffBuilder::default() + .with_initial_interval(val.initial_interval) + .with_randomization_factor(val.randomization_factor) + .with_multiplier(val.multiplier) + .with_max_interval(val.max_interval) + .with_max_elapsed_time(None) + .build() + } +} + +/// Builder for [`PythLazerExponentialBackoff`]. +/// +/// Provides a fluent interface for configuring exponential backoff parameters +/// with sensible defaults from the backoff crate. +#[derive(Debug)] +pub struct PythLazerExponentialBackoffBuilder { + initial_interval: Duration, + randomization_factor: f64, + multiplier: f64, + max_interval: Duration, +} + +impl Default for PythLazerExponentialBackoffBuilder { + fn default() -> Self { + Self { + initial_interval: Duration::from_millis(INITIAL_INTERVAL_MILLIS), + randomization_factor: RANDOMIZATION_FACTOR, + multiplier: MULTIPLIER, + max_interval: Duration::from_millis(MAX_INTERVAL_MILLIS), + } + } +} + +impl PythLazerExponentialBackoffBuilder { + /// Creates a new builder with default values. + pub fn new() -> Self { + Default::default() + } + + /// Sets the initial retry interval. + /// + /// This is the starting interval for the first retry attempt. + pub fn with_initial_interval(&mut self, initial_interval: Duration) -> &mut Self { + self.initial_interval = initial_interval; + self + } + + /// Sets the randomization factor to use for creating a range around the retry interval. + /// + /// A randomization factor of 0.5 results in a random period ranging between 50% below and 50% + /// above the retry interval. This helps avoid the "thundering herd" problem when multiple + /// clients retry at the same time. + pub fn with_randomization_factor(&mut self, randomization_factor: f64) -> &mut Self { + self.randomization_factor = randomization_factor; + self + } + + /// Sets the value to multiply the current interval with for each retry attempt. + /// + /// A multiplier of 2.0 means each retry interval will be double the previous one. + pub fn with_multiplier(&mut self, multiplier: f64) -> &mut Self { + self.multiplier = multiplier; + self + } + + /// Sets the maximum value of the back off period. + /// + /// Once the retry interval reaches this value it stops increasing, providing + /// an upper bound on the wait time between retries. + pub fn with_max_interval(&mut self, max_interval: Duration) -> &mut Self { + self.max_interval = max_interval; + self + } + + /// Builds the [`PythLazerExponentialBackoff`] configuration. + pub fn build(&self) -> PythLazerExponentialBackoff { + PythLazerExponentialBackoff { + initial_interval: self.initial_interval, + randomization_factor: self.randomization_factor, + multiplier: self.multiplier, + max_interval: self.max_interval, + } + } +} diff --git a/lazer/sdk/rust/client/src/client.rs b/lazer/sdk/rust/client/src/client.rs new file mode 100644 index 0000000000..ad19052df9 --- /dev/null +++ b/lazer/sdk/rust/client/src/client.rs @@ -0,0 +1,398 @@ +//! # Pyth Lazer Client +//! +//! This module provides a high-level client for connecting to Pyth Lazer data streams. +//! The client maintains multiple WebSocket connections for redundancy and provides +//! automatic deduplication of messages. +//! +//! ## Features +//! +//! - Multiple redundant WebSocket connections +//! - Automatic message deduplication +//! - Exponential backoff for reconnections +//! - Configurable timeouts and channel capacities +//! - Builder pattern for easy configuration +//! +//! ## Basic Usage +//! +//! ```rust,ignore +//! use pyth_lazer_client::PythLazerClientBuilder; +//! use pyth_lazer_protocol::subscription::SubscribeRequest; +//! +//! #[tokio::main] +//! async fn main() -> anyhow::Result<()> { +//! let mut client = PythLazerClientBuilder::new("your_access_token".to_string()) +//! .with_num_connections(2) +//! .build()?; +//! +//! let mut receiver = client.start().await?; +//! +//! // Subscribe to price feeds +//! let subscribe_request = SubscribeRequest { +//! // ... configure subscription +//! }; +//! client.subscribe(subscribe_request).await?; +//! +//! // Process incoming messages +//! while let Some(response) = receiver.recv().await { +//! println!("Received: {:?}", response); +//! } +//! +//! Ok(()) +//! } +//! ``` + +use std::time::Duration; + +use crate::{ + backoff::{PythLazerExponentialBackoff, PythLazerExponentialBackoffBuilder}, + resilient_ws_connection::PythLazerResilientWSConnection, + ws_connection::AnyResponse, + CHANNEL_CAPACITY, +}; +use anyhow::{bail, Result}; +use backoff::ExponentialBackoff; +use pyth_lazer_protocol::subscription::{SubscribeRequest, SubscriptionId}; +use tokio::sync::mpsc::{self, error::TrySendError}; +use tracing::{error, warn}; +use ttl_cache::TtlCache; +use url::Url; + +const DEDUP_CACHE_SIZE: usize = 100_000; +const DEDUP_TTL: Duration = Duration::from_secs(10); + +const DEFAULT_ENDPOINTS: [&str; 2] = [ + "wss://pyth-lazer-0.dourolabs.app/v1/stream", + "wss://pyth-lazer-1.dourolabs.app/v1/stream", +]; +const DEFAULT_NUM_CONNECTIONS: usize = 4; +const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +/// A high-performance client for connecting to Pyth Lazer data streams. +/// +/// The `PythLazerClient` maintains multiple WebSocket connections to Pyth Lazer endpoints +/// for redundancy. It automatically handles connection management, +/// message deduplication, and provides a unified stream of price updates. +/// +/// ## Architecture +/// +/// - Maintains multiple WebSocket connections to different endpoints +/// - Uses a TTL cache for deduplicating messages across connections +/// - Provides a single channel for consuming deduplicated messages +/// - Handles connection failures with exponential backoff +pub struct PythLazerClient { + endpoints: Vec, + access_token: String, + num_connections: usize, + ws_connections: Vec, + backoff: ExponentialBackoff, + timeout: Duration, + channel_capacity: usize, +} + +impl PythLazerClient { + /// Creates a new Pyth Lazer client instance. + /// + /// This is a low-level constructor. Consider using [`PythLazerClientBuilder`] for a more + /// convenient way to create clients with sensible defaults. + /// + /// # Arguments + /// + /// * `endpoints` - A vector of WebSocket endpoint URLs to connect to. Must not be empty. + /// * `access_token` - The authentication token for accessing Pyth Lazer services + /// * `num_connections` - The number of WebSocket connections to maintain for redundancy + /// * `backoff` - The exponential backoff configuration for connection retries + /// * `timeout` - The timeout duration for WebSocket operations + /// * `channel_capacity` - The capacity of the message channel + /// + /// # Returns + /// + /// Returns `Ok(PythLazerClient)` on success, or an error if the configuration is invalid. + /// + /// # Errors + /// + /// Returns an error if: + /// - The `endpoints` vector is empty + /// + pub fn new( + endpoints: Vec, + access_token: String, + num_connections: usize, + backoff: PythLazerExponentialBackoff, + timeout: Duration, + channel_capacity: usize, + ) -> Result { + if endpoints.is_empty() { + bail!("At least one endpoint must be provided"); + } + Ok(Self { + endpoints, + access_token, + num_connections, + ws_connections: Vec::with_capacity(num_connections), + backoff: backoff.into(), + timeout, + channel_capacity, + }) + } + + /// Starts the client and begins establishing WebSocket connections. + /// + /// This method initializes all WebSocket connections and starts the message processing + /// loop. It returns a receiver channel that will yield deduplicated messages from + /// all connections. + /// + /// # Returns + /// + /// Returns a `Receiver` that yields deduplicated messages from all + /// WebSocket connections. The receiver will continue to yield messages until + /// all connections are closed or the client is dropped. + /// + /// # Errors + /// + /// This method itself doesn't return errors, but individual connection failures + /// are handled internally with automatic reconnection using the configured backoff + /// strategy. + /// + /// # Message Deduplication + /// + /// Messages are deduplicated using a TTL cache with a 10-second window. This ensures + /// that identical messages received from multiple connections are only delivered once. + /// + pub async fn start(&mut self) -> Result> { + let (sender, receiver) = mpsc::channel::(self.channel_capacity); + let (ws_connection_sender, mut ws_connection_receiver) = + mpsc::channel::(CHANNEL_CAPACITY); + + for i in 0..self.num_connections { + let endpoint = self.endpoints[i % self.endpoints.len()].clone(); + let connection = PythLazerResilientWSConnection::new( + endpoint, + self.access_token.clone(), + self.backoff.clone(), + self.timeout, + ws_connection_sender.clone(), + ); + self.ws_connections.push(connection); + } + + let mut seen_updates = TtlCache::new(DEDUP_CACHE_SIZE); + + tokio::spawn(async move { + while let Some(response) = ws_connection_receiver.recv().await { + let cache_key = response.cache_key(); + if seen_updates.contains_key(&cache_key) { + continue; + } + seen_updates.insert(cache_key, response.clone(), DEDUP_TTL); + + match sender.try_send(response) { + Ok(_) => (), + Err(TrySendError::Full(r)) => { + warn!("Sender channel is full, responses will be delayed"); + if sender.send(r).await.is_err() { + error!("Sender channel is closed, stopping client"); + } + } + Err(TrySendError::Closed(_)) => { + error!("Sender channel is closed, stopping client"); + } + } + } + }); + + Ok(receiver) + } + + /// Subscribes to data streams across all WebSocket connections. + /// + /// This method sends the subscription request to all active WebSocket connections, + /// ensuring redundancy. If any connection fails to subscribe, + /// an error is returned, but other connections may still be subscribed. + /// + /// # Arguments + /// + /// * `subscribe_request` - The subscription request specifying which data streams to subscribe to + /// + /// # Returns + /// + /// Returns `Ok(())` if the subscription was successfully sent to all connections, + /// or an error if any connection failed to process the subscription. + /// + pub async fn subscribe(&mut self, subscribe_request: SubscribeRequest) -> Result<()> { + for connection in &mut self.ws_connections { + connection.subscribe(subscribe_request.clone()).await?; + } + Ok(()) + } + + /// Unsubscribes from a specific data stream across all WebSocket connections. + /// + /// This method sends an unsubscribe request for the specified subscription ID + /// to all active WebSocket connections. + /// + /// # Arguments + /// + /// * `subscription_id` - The ID of the subscription to cancel + /// + /// # Returns + /// + /// Returns `Ok(())` if the unsubscribe request was successfully sent to all connections, + /// or an error if any connection failed to process the request. + /// + pub async fn unsubscribe(&mut self, subscription_id: SubscriptionId) -> Result<()> { + for connection in &mut self.ws_connections { + connection.unsubscribe(subscription_id).await?; + } + Ok(()) + } +} + +/// A builder for creating [`PythLazerClient`] instances with customizable configuration. +/// +/// The builder provides a convenient way to configure a Pyth Lazer client with sensible +/// defaults while allowing customization of all parameters. It follows the builder pattern +/// for a fluent API. +/// +/// ## Default Configuration +/// +/// - **Endpoints**: Uses Pyth Lazer's default production endpoints +/// - **Connections**: 4 concurrent WebSocket connections +/// - **Timeout**: 5 seconds for WebSocket operations +/// - **Backoff**: Exponential backoff with default settings +/// - **Channel Capacity**: Uses the default 1000 +/// +pub struct PythLazerClientBuilder { + endpoints: Vec, + access_token: String, + num_connections: usize, + backoff: PythLazerExponentialBackoff, + timeout: Duration, + channel_capacity: usize, +} + +impl PythLazerClientBuilder { + /// Creates a new builder with default configuration. + /// + /// This initializes the builder with sensible defaults for production use: + /// - Default Pyth Lazer endpoints + /// - 4 WebSocket connections + /// - 5-second timeout + /// + /// # Arguments + /// + /// * `access_token` - The authentication token for accessing Pyth Lazer services + /// + pub fn new(access_token: String) -> Self { + Self { + endpoints: DEFAULT_ENDPOINTS + .iter() + .map(|&s| s.parse().unwrap()) + .collect(), + access_token, + num_connections: DEFAULT_NUM_CONNECTIONS, + backoff: PythLazerExponentialBackoffBuilder::default().build(), + timeout: DEFAULT_TIMEOUT, + channel_capacity: CHANNEL_CAPACITY, + } + } + + /// Sets custom WebSocket endpoints for the client. + /// + /// By default, the client uses Pyth Lazer's production endpoints. Use this method + /// to connect to different environments (staging, local development) or to use + /// custom endpoint configurations. + /// + /// # Arguments + /// + /// * `endpoints` - A vector of WebSocket endpoint URLs. Must not be empty. + /// + pub fn with_endpoints(mut self, endpoints: Vec) -> Self { + self.endpoints = endpoints; + self + } + + /// Sets the number of concurrent WebSocket connections to maintain. + /// + /// More connections provide better redundancy and can improve throughput, + /// but also consume more resources. + /// + /// # Arguments + /// + /// * `num_connections` - The number of WebSocket connections (must be > 0) + /// + pub fn with_num_connections(mut self, num_connections: usize) -> Self { + self.num_connections = num_connections; + self + } + + /// Sets the exponential backoff configuration for connection retries. + /// + /// The backoff strategy determines how the client handles connection failures + /// and retries. + /// + /// # Arguments + /// + /// * `backoff` - The exponential backoff configuration + /// + pub fn with_backoff(mut self, backoff: PythLazerExponentialBackoff) -> Self { + self.backoff = backoff; + self + } + + /// Sets the timeout duration for WebSocket operations. + /// + /// This timeout applies to each WebSocket connection, + /// if no response is received within this duration, + /// the connection will be considered failed and retried. + /// + /// # Arguments + /// + /// * `timeout` - The timeout duration for each WebSocket + /// + pub fn with_timeout(mut self, timeout: Duration) -> Self { + self.timeout = timeout; + self + } + + /// Sets the capacity of the internal message channel. + /// + /// This determines how many messages can be buffered internally before + /// the client starts applying backpressure. + /// + /// # Arguments + /// + /// * `channel_capacity` - The channel capacity (number of messages) + /// + pub fn with_channel_capacity(mut self, channel_capacity: usize) -> Self { + self.channel_capacity = channel_capacity; + self + } + + /// Builds the configured [`PythLazerClient`] instance. + /// + /// This consumes the builder and creates a new client with the specified + /// configuration. The client is ready to use but connections are not + /// established until [`PythLazerClient::start`] is called. + /// + /// # Returns + /// + /// Returns `Ok(PythLazerClient)` on success, or an error if the configuration + /// is invalid. + /// + /// # Errors + /// + /// Returns an error if: + /// - No endpoints are configured + /// - Any configuration parameter is invalid + /// + pub fn build(self) -> Result { + PythLazerClient::new( + self.endpoints, + self.access_token, + self.num_connections, + self.backoff, + self.timeout, + self.channel_capacity, + ) + } +} diff --git a/lazer/sdk/rust/client/src/lib.rs b/lazer/sdk/rust/client/src/lib.rs index 30c1df8902..c62eab1ff4 100644 --- a/lazer/sdk/rust/client/src/lib.rs +++ b/lazer/sdk/rust/client/src/lib.rs @@ -1,138 +1,6 @@ -use anyhow::Result; -use derive_more::From; -use futures_util::{SinkExt, StreamExt, TryStreamExt}; -use pyth_lazer_protocol::{ - binary_update::BinaryWsUpdate, - subscription::{ErrorResponse, Request, Response, SubscriptionId, UnsubscribeRequest}, -}; -use tokio_tungstenite::{connect_async, tungstenite::Message}; -use url::Url; +const CHANNEL_CAPACITY: usize = 1000; -/// A WebSocket client for consuming Pyth Lazer price feed updates -/// -/// This client provides a simple interface to: -/// - Connect to a Lazer WebSocket endpoint -/// - Subscribe to price feed updates -/// - Receive updates as a stream of messages -/// -pub struct LazerClient { - endpoint: Url, - access_token: String, - ws_sender: Option< - futures_util::stream::SplitSink< - tokio_tungstenite::WebSocketStream< - tokio_tungstenite::MaybeTlsStream, - >, - Message, - >, - >, -} - -#[derive(Debug, Clone, PartialEq, Eq, Hash, From)] -pub enum AnyResponse { - Json(Response), - Binary(BinaryWsUpdate), -} - -impl LazerClient { - /// Creates a new Lazer client instance - /// - /// # Arguments - /// * `endpoint` - The WebSocket URL of the Lazer service - /// * `access_token` - Access token for authentication - /// - /// # Returns - /// Returns a new client instance (not yet connected) - pub fn new(endpoint: &str, access_token: &str) -> Result { - let endpoint = Url::parse(endpoint)?; - let access_token = access_token.to_string(); - Ok(Self { - endpoint, - access_token, - ws_sender: None, - }) - } - - /// Starts the WebSocket connection - /// - /// # Returns - /// Returns a stream of responses from the server - pub async fn start(&mut self) -> Result>> { - let url = self.endpoint.clone(); - let mut request = - tokio_tungstenite::tungstenite::client::IntoClientRequest::into_client_request(url)?; - - request.headers_mut().insert( - "Authorization", - format!("Bearer {}", self.access_token).parse().unwrap(), - ); - - let (ws_stream, _) = connect_async(request).await?; - let (ws_sender, ws_receiver) = ws_stream.split(); - - self.ws_sender = Some(ws_sender); - let response_stream = - ws_receiver - .map_err(anyhow::Error::from) - .try_filter_map(|msg| async { - let r: Result> = match msg { - Message::Text(text) => { - Ok(Some(serde_json::from_str::(&text)?.into())) - } - Message::Binary(data) => { - Ok(Some(BinaryWsUpdate::deserialize_slice(&data)?.into())) - } - Message::Close(_) => Ok(Some( - Response::Error(ErrorResponse { - error: "WebSocket connection closed".to_string(), - }) - .into(), - )), - _ => Ok(None), - }; - r - }); - - Ok(response_stream) - } - - /// Subscribes to price feed updates - /// - /// # Arguments - /// * `request` - A subscription request containing feed IDs and parameters - pub async fn subscribe(&mut self, request: Request) -> Result<()> { - if let Some(sender) = &mut self.ws_sender { - let msg = serde_json::to_string(&request)?; - sender.send(Message::Text(msg)).await?; - Ok(()) - } else { - anyhow::bail!("WebSocket connection not started") - } - } - - /// Unsubscribes from a previously subscribed feed - /// - /// # Arguments - /// * `subscription_id` - The ID of the subscription to cancel - pub async fn unsubscribe(&mut self, subscription_id: SubscriptionId) -> Result<()> { - if let Some(sender) = &mut self.ws_sender { - let request = Request::Unsubscribe(UnsubscribeRequest { subscription_id }); - let msg = serde_json::to_string(&request)?; - sender.send(Message::Text(msg)).await?; - Ok(()) - } else { - anyhow::bail!("WebSocket connection not started") - } - } - - /// Closes the WebSocket connection - pub async fn close(&mut self) -> Result<()> { - if let Some(sender) = &mut self.ws_sender { - sender.send(Message::Close(None)).await?; - self.ws_sender = None; - Ok(()) - } else { - anyhow::bail!("WebSocket connection not started") - } - } -} +pub mod backoff; +pub mod client; +pub mod resilient_ws_connection; +pub mod ws_connection; diff --git a/lazer/sdk/rust/client/src/resilient_ws_connection.rs b/lazer/sdk/rust/client/src/resilient_ws_connection.rs new file mode 100644 index 0000000000..70385d5946 --- /dev/null +++ b/lazer/sdk/rust/client/src/resilient_ws_connection.rs @@ -0,0 +1,210 @@ +use std::time::Duration; + +use backoff::{backoff::Backoff, ExponentialBackoff}; +use futures_util::StreamExt; +use pyth_lazer_protocol::subscription::{ + Request, SubscribeRequest, SubscriptionId, UnsubscribeRequest, +}; +use tokio::{pin, select, sync::mpsc, time::Instant}; +use tracing::{error, info, warn}; +use url::Url; + +use crate::{ + ws_connection::{AnyResponse, PythLazerWSConnection}, + CHANNEL_CAPACITY, +}; +use anyhow::{bail, Context, Result}; + +const BACKOFF_RESET_DURATION: Duration = Duration::from_secs(10); + +pub struct PythLazerResilientWSConnection { + request_sender: mpsc::Sender, +} + +impl PythLazerResilientWSConnection { + /// Creates a new resilient WebSocket client instance + /// + /// # Arguments + /// * `endpoint` - The WebSocket URL of the Lazer service + /// * `access_token` - Access token for authentication + /// * `sender` - A sender to send responses back to the client + /// + /// # Returns + /// Returns a new client instance (not yet connected) + pub fn new( + endpoint: Url, + access_token: String, + backoff: ExponentialBackoff, + timeout: Duration, + sender: mpsc::Sender, + ) -> Self { + let (request_sender, mut request_receiver) = mpsc::channel(CHANNEL_CAPACITY); + let mut task = + PythLazerResilientWSConnectionTask::new(endpoint, access_token, backoff, timeout); + + tokio::spawn(async move { + if let Err(e) = task.run(sender, &mut request_receiver).await { + error!("Resilient WebSocket connection task failed: {}", e); + } + }); + + Self { request_sender } + } + + pub async fn subscribe(&mut self, request: SubscribeRequest) -> Result<()> { + self.request_sender + .send(Request::Subscribe(request)) + .await + .context("Failed to send subscribe request")?; + Ok(()) + } + + pub async fn unsubscribe(&mut self, subscription_id: SubscriptionId) -> Result<()> { + self.request_sender + .send(Request::Unsubscribe(UnsubscribeRequest { subscription_id })) + .await + .context("Failed to send unsubscribe request")?; + Ok(()) + } +} + +struct PythLazerResilientWSConnectionTask { + endpoint: Url, + access_token: String, + subscriptions: Vec, + backoff: ExponentialBackoff, + timeout: Duration, +} + +impl PythLazerResilientWSConnectionTask { + pub fn new( + endpoint: Url, + access_token: String, + backoff: ExponentialBackoff, + timeout: Duration, + ) -> Self { + Self { + endpoint, + access_token, + subscriptions: Vec::new(), + backoff, + timeout, + } + } + + pub async fn run( + &mut self, + response_sender: mpsc::Sender, + request_receiver: &mut mpsc::Receiver, + ) -> Result<()> { + loop { + let start_time = Instant::now(); + if let Err(e) = self.start(response_sender.clone(), request_receiver).await { + // If a connection was working for BACKOFF_RESET_DURATION + // and timeout + 1sec, it was considered successful therefore reset the backoff + if start_time.elapsed() > BACKOFF_RESET_DURATION + && start_time.elapsed() > self.timeout + Duration::from_secs(1) + { + self.backoff.reset(); + } + + let delay = self.backoff.next_backoff(); + match delay { + Some(d) => { + info!("WebSocket connection failed: {}. Retrying in {:?}", e, d); + tokio::time::sleep(d).await; + } + None => { + bail!( + "Max retries reached for WebSocket connection to {}, this should never happen, please contact developers", + self.endpoint + ); + } + } + } + } + } + + pub async fn start( + &mut self, + sender: mpsc::Sender, + request_receiver: &mut mpsc::Receiver, + ) -> Result<()> { + let mut ws_connection = + PythLazerWSConnection::new(self.endpoint.clone(), self.access_token.clone())?; + let stream = ws_connection.start().await?; + pin!(stream); + + for subscription in self.subscriptions.clone() { + ws_connection + .send_request(Request::Subscribe(subscription)) + .await?; + } + loop { + let timeout_response = tokio::time::timeout(self.timeout, stream.next()); + + select! { + response = timeout_response => { + match response { + Ok(Some(response)) => match response { + Ok(response) => { + sender + .send(response) + .await + .context("Failed to send response")?; + } + Err(e) => { + bail!("WebSocket stream error: {}", e); + } + }, + Ok(None) => { + bail!("WebSocket stream ended unexpectedly"); + } + Err(_elapsed) => { + bail!("WebSocket stream timed out"); + } + } + } + Some(request) = request_receiver.recv() => { + match request { + Request::Subscribe(request) => { + self.subscribe(&mut ws_connection, request).await?; + } + Request::Unsubscribe(request) => { + self.unsubscribe(&mut ws_connection, request).await?; + } + } + } + } + } + } + + pub async fn subscribe( + &mut self, + ws_connection: &mut PythLazerWSConnection, + request: SubscribeRequest, + ) -> Result<()> { + self.subscriptions.push(request.clone()); + ws_connection.subscribe(request).await + } + + pub async fn unsubscribe( + &mut self, + ws_connection: &mut PythLazerWSConnection, + request: UnsubscribeRequest, + ) -> Result<()> { + if let Some(index) = self + .subscriptions + .iter() + .position(|r| r.subscription_id == request.subscription_id) + { + self.subscriptions.remove(index); + } else { + warn!( + "Unsubscribe called for non-existent subscription: {:?}", + request.subscription_id + ); + } + ws_connection.unsubscribe(request).await + } +} diff --git a/lazer/sdk/rust/client/src/ws_connection.rs b/lazer/sdk/rust/client/src/ws_connection.rs new file mode 100644 index 0000000000..385bd2efd7 --- /dev/null +++ b/lazer/sdk/rust/client/src/ws_connection.rs @@ -0,0 +1,144 @@ +use std::hash::{DefaultHasher, Hash, Hasher}; + +use anyhow::Result; +use derive_more::From; +use futures_util::{SinkExt, StreamExt, TryStreamExt}; +use pyth_lazer_protocol::{ + binary_update::BinaryWsUpdate, + subscription::{ErrorResponse, Request, Response, SubscribeRequest, UnsubscribeRequest}, +}; +use tokio_tungstenite::{connect_async, tungstenite::Message}; +use url::Url; + +/// A WebSocket client for consuming Pyth Lazer price feed updates +/// +/// This client provides a simple interface to: +/// - Connect to a Lazer WebSocket endpoint +/// - Subscribe to price feed updates +/// - Receive updates as a stream of messages +/// +pub struct PythLazerWSConnection { + endpoint: Url, + access_token: String, + ws_sender: Option< + futures_util::stream::SplitSink< + tokio_tungstenite::WebSocketStream< + tokio_tungstenite::MaybeTlsStream, + >, + Message, + >, + >, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, From)] +pub enum AnyResponse { + Json(Response), + Binary(BinaryWsUpdate), +} + +impl AnyResponse { + pub fn cache_key(&self) -> u64 { + let mut hasher = DefaultHasher::new(); + self.hash(&mut hasher); + hasher.finish() + } +} +impl PythLazerWSConnection { + /// Creates a new Lazer client instance + /// + /// # Arguments + /// * `endpoint` - The WebSocket URL of the Lazer service + /// * `access_token` - Access token for authentication + /// + /// # Returns + /// Returns a new client instance (not yet connected) + pub fn new(endpoint: Url, access_token: String) -> Result { + Ok(Self { + endpoint, + access_token, + ws_sender: None, + }) + } + + /// Starts the WebSocket connection + /// + /// # Returns + /// Returns a stream of responses from the server + pub async fn start(&mut self) -> Result>> { + let url = self.endpoint.clone(); + let mut request = + tokio_tungstenite::tungstenite::client::IntoClientRequest::into_client_request(url)?; + + request.headers_mut().insert( + "Authorization", + format!("Bearer {}", self.access_token).parse().unwrap(), + ); + + let (ws_stream, _) = connect_async(request).await?; + let (ws_sender, ws_receiver) = ws_stream.split(); + + self.ws_sender = Some(ws_sender); + let response_stream = + ws_receiver + .map_err(anyhow::Error::from) + .try_filter_map(|msg| async { + let r: Result> = match msg { + Message::Text(text) => { + Ok(Some(serde_json::from_str::(&text)?.into())) + } + Message::Binary(data) => { + Ok(Some(BinaryWsUpdate::deserialize_slice(&data)?.into())) + } + Message::Close(_) => Ok(Some( + Response::Error(ErrorResponse { + error: "WebSocket connection closed".to_string(), + }) + .into(), + )), + _ => Ok(None), + }; + r + }); + + Ok(response_stream) + } + + pub async fn send_request(&mut self, request: Request) -> Result<()> { + if let Some(sender) = &mut self.ws_sender { + let msg = serde_json::to_string(&request)?; + sender.send(Message::Text(msg)).await?; + Ok(()) + } else { + anyhow::bail!("WebSocket connection not started") + } + } + + /// Subscribes to price feed updates + /// + /// # Arguments + /// * `request` - A subscription request containing feed IDs and parameters + pub async fn subscribe(&mut self, request: SubscribeRequest) -> Result<()> { + let request = Request::Subscribe(request); + self.send_request(request).await + } + + /// Unsubscribes from a previously subscribed feed + /// + /// # Arguments + /// * `subscription_id` - The ID of the subscription to cancel + pub async fn unsubscribe(&mut self, request: UnsubscribeRequest) -> Result<()> { + let request = Request::Unsubscribe(request); + self.send_request(request).await + } + + /// Closes the WebSocket connection + pub async fn close(&mut self) -> Result<()> { + if let Some(sender) = &mut self.ws_sender { + sender.send(Message::Close(None)).await?; + self.ws_sender = None; + Ok(()) + } else { + anyhow::bail!("WebSocket connection not started") + } + } +} diff --git a/lazer/sdk/rust/protocol/Cargo.toml b/lazer/sdk/rust/protocol/Cargo.toml index 537dfed2a5..2ba075e1a6 100644 --- a/lazer/sdk/rust/protocol/Cargo.toml +++ b/lazer/sdk/rust/protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pyth-lazer-protocol" -version = "0.9.1" +version = "0.10.0" edition = "2021" description = "Pyth Lazer SDK - protocol types." license = "Apache-2.0" diff --git a/lazer/sdk/rust/protocol/src/jrpc.rs b/lazer/sdk/rust/protocol/src/jrpc.rs index 5dca3e6362..ff01907535 100644 --- a/lazer/sdk/rust/protocol/src/jrpc.rs +++ b/lazer/sdk/rust/protocol/src/jrpc.rs @@ -9,7 +9,7 @@ pub struct PythLazerAgentJrpcV1 { pub jsonrpc: JsonRpcVersion, #[serde(flatten)] pub params: JrpcCall, - pub id: i64, + pub id: Option, } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] @@ -20,14 +20,14 @@ pub enum JrpcCall { GetMetadata(GetMetadataParams), } -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] pub struct FeedUpdateParams { pub feed_id: PriceFeedId, pub source_timestamp: TimestampUs, pub update: UpdateParams, } -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] #[serde(tag = "type")] pub enum UpdateParams { #[serde(rename = "price")] @@ -59,6 +59,7 @@ pub enum JsonRpcVersion { } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +#[serde(untagged)] pub enum JrpcResponse { Success(JrpcSuccessResponse), Error(JrpcErrorResponse), @@ -89,7 +90,8 @@ pub struct JrpcErrorObject { #[derive(Debug, Eq, PartialEq)] pub enum JrpcError { ParseError(String), - InternalError, + InternalError(String), + SendUpdateError(FeedUpdateParams), } // note: error codes can be found in the rfc https://www.jsonrpc.org/specification#error_object @@ -101,10 +103,15 @@ impl From for JrpcErrorObject { message: "Parse error".to_string(), data: Some(error_message.into()), }, - JrpcError::InternalError => JrpcErrorObject { + JrpcError::InternalError(error_message) => JrpcErrorObject { code: -32603, message: "Internal error".to_string(), - data: None, + data: Some(error_message.into()), + }, + JrpcError::SendUpdateError(feed_update_params) => JrpcErrorObject { + code: -32000, + message: "Internal error".to_string(), + data: Some(serde_json::to_value(feed_update_params).unwrap()), }, } } @@ -165,7 +172,47 @@ mod tests { best_ask_price: Some(Price::from_integer(1234567892, 0).unwrap()), }, }), - id: 1, + id: Some(1), + }; + + assert_eq!( + serde_json::from_str::(json).unwrap(), + expected + ); + } + + #[test] + fn test_push_update_price_without_id() { + let json = r#" + { + "jsonrpc": "2.0", + "method": "push_update", + "params": { + "feed_id": 1, + "source_timestamp": 745214124124, + + "update": { + "type": "price", + "price": 5432, + "best_bid_price": 5432, + "best_ask_price": 5432 + } + } + } + "#; + + let expected = PythLazerAgentJrpcV1 { + jsonrpc: JsonRpcVersion::V2, + params: PushUpdate(FeedUpdateParams { + feed_id: PriceFeedId(1), + source_timestamp: TimestampUs::from_micros(745214124124), + update: UpdateParams::PriceUpdate { + price: Price::from_integer(5432, 0).unwrap(), + best_bid_price: Some(Price::from_integer(5432, 0).unwrap()), + best_ask_price: Some(Price::from_integer(5432, 0).unwrap()), + }, + }), + id: None, }; assert_eq!( @@ -204,7 +251,7 @@ mod tests { best_ask_price: None, }, }), - id: 1, + id: Some(1), }; assert_eq!( @@ -243,7 +290,7 @@ mod tests { rate: Rate::from_integer(1234567891, 0).unwrap(), }, }), - id: 1, + id: Some(1), }; assert_eq!( @@ -280,7 +327,7 @@ mod tests { rate: Rate::from_integer(1234567891, 0).unwrap(), }, }), - id: 1, + id: Some(1), }; assert_eq!( @@ -309,7 +356,7 @@ mod tests { names: Some(vec!["BTC/USD".to_string()]), asset_types: Some(vec!["crypto".to_string()]), }), - id: 1, + id: Some(1), }; assert_eq!( @@ -335,7 +382,7 @@ mod tests { names: None, asset_types: None, }), - id: 1, + id: Some(1), }; assert_eq!( @@ -396,4 +443,52 @@ mod tests { } ); } + + #[test] + pub fn test_parse_response() { + let success_response = serde_json::from_str::>( + r#" + { + "jsonrpc": "2.0", + "id": 2, + "result": "success" + }"#, + ) + .unwrap(); + + assert_eq!( + success_response, + JrpcResponse::Success(JrpcSuccessResponse:: { + jsonrpc: JsonRpcVersion::V2, + result: "success".to_string(), + id: 2, + }) + ); + + let error_response = serde_json::from_str::>( + r#" + { + "jsonrpc": "2.0", + "id": 3, + "error": { + "code": -32603, + "message": "Internal error" + } + }"#, + ) + .unwrap(); + + assert_eq!( + error_response, + JrpcResponse::Error(JrpcErrorResponse { + jsonrpc: JsonRpcVersion::V2, + error: JrpcErrorObject { + code: -32603, + message: "Internal error".to_string(), + data: None, + }, + id: Some(3), + }) + ); + } } diff --git a/packages/known-publishers/src/icons/color/portofino.svg b/packages/known-publishers/src/icons/color/portofino.svg new file mode 100644 index 0000000000..49fefe9877 --- /dev/null +++ b/packages/known-publishers/src/icons/color/portofino.svg @@ -0,0 +1,4 @@ + + + + diff --git a/packages/known-publishers/src/icons/monochrome/portofino.svg b/packages/known-publishers/src/icons/monochrome/portofino.svg new file mode 100644 index 0000000000..d2bb9d54cb --- /dev/null +++ b/packages/known-publishers/src/icons/monochrome/portofino.svg @@ -0,0 +1,3 @@ + + + diff --git a/packages/known-publishers/src/index.tsx b/packages/known-publishers/src/index.tsx index 25a706d520..3ff9db3152 100644 --- a/packages/known-publishers/src/index.tsx +++ b/packages/known-publishers/src/index.tsx @@ -3,6 +3,7 @@ import finazonColor from "./icons/color/finazon.svg"; import lotechColor from "./icons/color/lotech.svg"; import nobiColor from "./icons/color/nobi.svg"; import orcaColor from "./icons/color/orca.svg"; +import portofinoColor from "./icons/color/portofino.svg"; import sentioColor from "./icons/color/sentio.svg"; import wooColor from "./icons/color/woo.svg"; import amberDark from "./icons/dark/amber.svg"; @@ -21,6 +22,7 @@ import lotechMonochrome from "./icons/monochrome/lotech.svg"; import ltpMonochrome from "./icons/monochrome/ltp.svg"; import nobiMonochrome from "./icons/monochrome/nobi.svg"; import orcaMonochrome from "./icons/monochrome/orca.svg"; +import portofinoMonochrome from "./icons/monochrome/portofino.svg"; import sentioMonochrome from "./icons/monochrome/sentio.svg"; import wooMonochrome from "./icons/monochrome/woo.svg"; @@ -120,6 +122,13 @@ export const knownPublishers = { light: amberLight, }, }, + UZZ1sH1jvTV5QPHtRcsA6inURuSoD5UFT6a2RBTNvXr: { + name: "Portofino", + icon: { + monochrome: portofinoMonochrome, + color: portofinoColor, + }, + }, } as const; export const lookup = (value: string) => diff --git a/target_chains/cosmwasm/examples/cw-contract/Cargo.lock b/target_chains/cosmwasm/examples/cw-contract/Cargo.lock index c255e5b155..b482f3fc0f 100644 --- a/target_chains/cosmwasm/examples/cw-contract/Cargo.lock +++ b/target_chains/cosmwasm/examples/cw-contract/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "ahash" @@ -485,7 +485,9 @@ dependencies = [ [[package]] name = "pyth-sdk-cw" -version = "0.1.0" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c04e9f2961bce1ef13b09afcdb5aee7d4ddde83669e5f9d2824ba422cb00de48" dependencies = [ "cosmwasm-schema", "cosmwasm-std", @@ -747,4 +749,4 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" name = "zeroize" version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" \ No newline at end of file +checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" diff --git a/target_chains/solana/sdk/js/pyth_solana_receiver/package.json b/target_chains/solana/sdk/js/pyth_solana_receiver/package.json index 7f32568be0..8f4e4bf2a2 100644 --- a/target_chains/solana/sdk/js/pyth_solana_receiver/package.json +++ b/target_chains/solana/sdk/js/pyth_solana_receiver/package.json @@ -1,6 +1,6 @@ { "name": "@pythnetwork/pyth-solana-receiver", - "version": "0.10.1", + "version": "0.10.2", "description": "Pyth solana receiver SDK", "homepage": "https://pyth.network", "main": "lib/index.js", diff --git a/target_chains/solana/sdk/js/pyth_solana_receiver/src/vaa.ts b/target_chains/solana/sdk/js/pyth_solana_receiver/src/vaa.ts index da4eb7ccce..bad88f630c 100644 --- a/target_chains/solana/sdk/js/pyth_solana_receiver/src/vaa.ts +++ b/target_chains/solana/sdk/js/pyth_solana_receiver/src/vaa.ts @@ -133,35 +133,39 @@ async function generateVaaInstructionGroups( // Second write and verify instructions const writeSecondPartAndVerifyInstructions: InstructionWithEphemeralSigners[] = - [ - { - instruction: await wormhole.methods - .writeEncodedVaa({ - index: VAA_SPLIT_INDEX, - data: vaa.subarray(VAA_SPLIT_INDEX), - }) - .accounts({ - draftVaa: encodedVaaKeypair.publicKey, - }) - .instruction(), - signers: [], - computeUnits: WRITE_ENCODED_VAA_COMPUTE_BUDGET, - }, - { - instruction: await wormhole.methods - .verifyEncodedVaaV1() - .accounts({ - guardianSet: getGuardianSetPda( - getGuardianSetIndex(vaa), - wormhole.programId, - ), - draftVaa: encodedVaaKeypair.publicKey, - }) - .instruction(), - signers: [], - computeUnits: VERIFY_ENCODED_VAA_COMPUTE_BUDGET, - }, - ]; + []; + + // The second write instruction is only needed if there are more bytes past the split index in the VAA + if (vaa.length > VAA_SPLIT_INDEX) { + writeSecondPartAndVerifyInstructions.push({ + instruction: await wormhole.methods + .writeEncodedVaa({ + index: VAA_SPLIT_INDEX, + data: vaa.subarray(VAA_SPLIT_INDEX), + }) + .accounts({ + draftVaa: encodedVaaKeypair.publicKey, + }) + .instruction(), + signers: [], + computeUnits: WRITE_ENCODED_VAA_COMPUTE_BUDGET, + }); + } + + writeSecondPartAndVerifyInstructions.push({ + instruction: await wormhole.methods + .verifyEncodedVaaV1() + .accounts({ + guardianSet: getGuardianSetPda( + getGuardianSetIndex(vaa), + wormhole.programId, + ), + draftVaa: encodedVaaKeypair.publicKey, + }) + .instruction(), + signers: [], + computeUnits: VERIFY_ENCODED_VAA_COMPUTE_BUDGET, + }); // Close instructions const closeInstructions: InstructionWithEphemeralSigners[] = [ diff --git a/target_chains/stylus/contracts/pyth-receiver/src/error.rs b/target_chains/stylus/contracts/pyth-receiver/src/error.rs index caeb2e4922..b195711098 100644 --- a/target_chains/stylus/contracts/pyth-receiver/src/error.rs +++ b/target_chains/stylus/contracts/pyth-receiver/src/error.rs @@ -20,11 +20,105 @@ pub enum PythReceiverError { PriceFeedNotFoundWithinRange, NoFreshUpdate, PriceFeedNotFound, + InvalidGovernanceMessage, + InvalidGovernanceTarget, + InvalidGovernanceAction, + InvalidGovernanceDataSource, + OldGovernanceMessage, + GovernanceMessageAlreadyExecuted, + InvalidWormholeAddressToSet, + WormholeUninitialized, } impl core::fmt::Debug for PythReceiverError { - fn fmt(&self, _: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - Ok(()) + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + PythReceiverError::PriceUnavailable => write!(f, "PriceUnavailable"), + PythReceiverError::InvalidUpdateData => write!(f, "InvalidUpdateData"), + PythReceiverError::VaaVerificationFailed => write!(f, "VaaVerificationFailed"), + PythReceiverError::InvalidVaa => write!(f, "InvalidVaa"), + PythReceiverError::InvalidWormholeMessage => write!(f, "InvalidWormholeMessage"), + PythReceiverError::InvalidMerkleProof => write!(f, "InvalidMerkleProof"), + PythReceiverError::InvalidAccumulatorMessage => write!(f, "InvalidAccumulatorMessage"), + PythReceiverError::InvalidMerkleRoot => write!(f, "InvalidMerkleRoot"), + PythReceiverError::InvalidMerklePath => write!(f, "InvalidMerklePath"), + PythReceiverError::InvalidUnknownSource => write!(f, "InvalidUnknownSource"), + PythReceiverError::NewPriceUnavailable => write!(f, "NewPriceUnavailable"), + PythReceiverError::InvalidAccumulatorMessageType => { + write!(f, "InvalidAccumulatorMessageType") + } + PythReceiverError::InsufficientFee => write!(f, "InsufficientFee"), + PythReceiverError::InvalidEmitterAddress => write!(f, "InvalidEmitterAddress"), + PythReceiverError::TooManyUpdates => write!(f, "TooManyUpdates"), + PythReceiverError::PriceFeedNotFoundWithinRange => { + write!(f, "PriceFeedNotFoundWithinRange") + } + PythReceiverError::NoFreshUpdate => write!(f, "NoFreshUpdate"), + PythReceiverError::PriceFeedNotFound => write!(f, "PriceFeedNotFound"), + PythReceiverError::InvalidGovernanceMessage => write!(f, "InvalidGovernanceMessage"), + PythReceiverError::InvalidGovernanceTarget => write!(f, "InvalidGovernanceTarget"), + PythReceiverError::InvalidGovernanceAction => write!(f, "InvalidGovernanceAction"), + PythReceiverError::InvalidGovernanceDataSource => { + write!(f, "InvalidGovernanceDataSource") + } + PythReceiverError::OldGovernanceMessage => write!(f, "OldGovernanceMessage"), + PythReceiverError::GovernanceMessageAlreadyExecuted => { + write!(f, "GovernanceMessageAlreadyExecuted") + } + PythReceiverError::InvalidWormholeAddressToSet => { + write!(f, "InvalidWormholeAddressToSet") + } + PythReceiverError::WormholeUninitialized => { + write!(f, "Wormhole is uninitialized, please set the Wormhole address and initialize the contract first") + } + } + } +} + +impl core::fmt::Display for PythReceiverError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + PythReceiverError::PriceUnavailable => write!(f, "Price unavailable"), + PythReceiverError::InvalidUpdateData => write!(f, "Invalid update data"), + PythReceiverError::VaaVerificationFailed => write!(f, "VAA verification failed"), + PythReceiverError::InvalidVaa => write!(f, "Invalid VAA"), + PythReceiverError::InvalidWormholeMessage => write!(f, "Invalid Wormhole message"), + PythReceiverError::InvalidMerkleProof => write!(f, "Invalid Merkle proof"), + PythReceiverError::InvalidAccumulatorMessage => { + write!(f, "Invalid accumulator message") + } + PythReceiverError::InvalidMerkleRoot => write!(f, "Invalid Merkle root"), + PythReceiverError::InvalidMerklePath => write!(f, "Invalid Merkle path"), + PythReceiverError::InvalidUnknownSource => write!(f, "Invalid unknown source"), + PythReceiverError::NewPriceUnavailable => write!(f, "New price unavailable"), + PythReceiverError::InvalidAccumulatorMessageType => { + write!(f, "Invalid accumulator message type") + } + PythReceiverError::InsufficientFee => write!(f, "Insufficient fee"), + PythReceiverError::InvalidEmitterAddress => write!(f, "Invalid emitter address"), + PythReceiverError::TooManyUpdates => write!(f, "Too many updates"), + PythReceiverError::PriceFeedNotFoundWithinRange => { + write!(f, "Price feed not found within range") + } + PythReceiverError::NoFreshUpdate => write!(f, "No fresh update"), + PythReceiverError::PriceFeedNotFound => write!(f, "Price feed not found"), + PythReceiverError::InvalidGovernanceMessage => write!(f, "Invalid governance message"), + PythReceiverError::InvalidGovernanceTarget => write!(f, "Invalid governance target"), + PythReceiverError::InvalidGovernanceAction => write!(f, "Invalid governance action"), + PythReceiverError::InvalidGovernanceDataSource => { + write!(f, "Invalid governance data source") + } + PythReceiverError::OldGovernanceMessage => write!(f, "Old governance message"), + PythReceiverError::GovernanceMessageAlreadyExecuted => { + write!(f, "Governance message already executed") + } + PythReceiverError::InvalidWormholeAddressToSet => { + write!(f, "Invalid Wormhole address to set") + } + PythReceiverError::WormholeUninitialized => { + write!(f, "Wormhole is uninitialized, please set the Wormhole address and initialize the contract first") + } + } } } @@ -49,6 +143,14 @@ impl From for Vec { PythReceiverError::PriceFeedNotFoundWithinRange => 16, PythReceiverError::NoFreshUpdate => 17, PythReceiverError::PriceFeedNotFound => 18, + PythReceiverError::InvalidGovernanceMessage => 19, + PythReceiverError::InvalidGovernanceTarget => 20, + PythReceiverError::InvalidGovernanceAction => 21, + PythReceiverError::InvalidGovernanceDataSource => 22, + PythReceiverError::OldGovernanceMessage => 23, + PythReceiverError::GovernanceMessageAlreadyExecuted => 24, + PythReceiverError::InvalidWormholeAddressToSet => 25, + PythReceiverError::WormholeUninitialized => 26, }] } } diff --git a/target_chains/stylus/contracts/pyth-receiver/src/governance.rs b/target_chains/stylus/contracts/pyth-receiver/src/governance.rs new file mode 100644 index 0000000000..0da3cb48a2 --- /dev/null +++ b/target_chains/stylus/contracts/pyth-receiver/src/governance.rs @@ -0,0 +1,318 @@ +use alloc::vec::Vec; +use stylus_sdk::{ + alloy_primitives::{Address, FixedBytes, U16, U256, U32, U64}, + call::Call, + prelude::*, +}; + +use crate::{ + error::PythReceiverError, + governance_structs::{self, *}, + structs::DataSource, + IWormholeContract, PythReceiver, +}; +use wormhole_vaas::{Readable, Vaa, Writeable}; + +impl PythReceiver { + pub fn execute_governance_instruction_internal( + &mut self, + data: Vec, + ) -> Result<(), PythReceiverError> { + let wormhole: IWormholeContract = IWormholeContract::new(self.wormhole.get()); + let config = Call::new(); + + wormhole + .parse_and_verify_vm(config, Vec::from(data.clone())) + .map_err(|_| PythReceiverError::InvalidWormholeMessage)?; + + let vm = Vaa::read(&mut Vec::from(data.clone()).as_slice()) + .map_err(|_| PythReceiverError::InvalidVaa)?; + + verify_governance_vm(self, vm.clone())?; + + let instruction = governance_structs::parse_instruction(vm.body.payload.to_vec()) + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?; + + let chain_id_config = Call::new(); + + let wormhole_id = wormhole + .chain_id(chain_id_config) + .map_err(|_| PythReceiverError::WormholeUninitialized)?; + + if instruction.target_chain_id != 0 && instruction.target_chain_id != wormhole_id { + return Err(PythReceiverError::InvalidGovernanceTarget); + } + + match instruction.payload { + GovernancePayload::SetFee(payload) => { + self.set_fee(payload.value, payload.expo); + } + GovernancePayload::SetDataSources(payload) => { + set_data_sources(self, payload.sources); + } + GovernancePayload::SetWormholeAddress(payload) => { + self.set_wormhole_address(payload.address, data.clone())?; + } + GovernancePayload::RequestGovernanceDataSourceTransfer(_) => { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + GovernancePayload::AuthorizeGovernanceDataSourceTransfer(payload) => { + self.authorize_governance_transfer(payload.claim_vaa)?; + } + GovernancePayload::UpgradeContract(_payload) => {} + GovernancePayload::SetValidPeriod(payload) => { + self.set_valid_period(payload.valid_time_period_seconds); + } + GovernancePayload::SetTransactionFee(payload) => { + self.set_transaction_fee(payload.value, payload.expo); + } + GovernancePayload::WithdrawFee(payload) => { + self.withdraw_fee(payload.value, payload.expo, payload.target_address)?; + } + } + + Ok(()) + } + + fn upgrade_contract(&self, _new_implementation: FixedBytes<32>) { + unimplemented!("Upgrade contract not yet implemented"); + } + + fn set_fee(&mut self, value: u64, expo: u64) { + let new_fee = U256::from(value).saturating_mul(U256::from(10).pow(U256::from(expo))); + let old_fee = self.single_update_fee_in_wei.get(); + + self.single_update_fee_in_wei.set(new_fee); + + log(self.vm(), crate::FeeSet { old_fee, new_fee }); + } + + fn set_valid_period(&mut self, valid_time_period_seconds: u64) { + let old_valid_period = self.valid_time_period_seconds.get(); + let new_valid_period = U256::from(valid_time_period_seconds); + self.valid_time_period_seconds.set(new_valid_period); + + log( + self.vm(), + crate::ValidPeriodSet { + old_valid_period, + new_valid_period, + }, + ); + } + + fn set_wormhole_address( + &mut self, + address: Address, + data: Vec, + ) -> Result<(), PythReceiverError> { + let wormhole: IWormholeContract = IWormholeContract::new(address); + let config = Call::new(); + + wormhole + .parse_and_verify_vm(config, data.clone()) + .map_err(|_| PythReceiverError::InvalidVaa)?; + + let vm = Vaa::read(&mut data.as_slice()) + .map_err(|_| PythReceiverError::VaaVerificationFailed)?; + + if vm.body.emitter_chain != self.governance_data_source_chain_id.get().to::() { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + + if vm.body.emitter_address.as_slice() + != self.governance_data_source_emitter_address.get().as_slice() + { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + + if vm.body.sequence.to::() <= self.last_executed_governance_sequence.get().to::() + { + return Err(PythReceiverError::InvalidWormholeAddressToSet); + } + + let data = governance_structs::parse_instruction(vm.body.payload.to_vec()) + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?; + + match data.payload { + GovernancePayload::SetWormholeAddress(payload) => { + if payload.address != address { + return Err(PythReceiverError::InvalidWormholeAddressToSet); + } + } + _ => return Err(PythReceiverError::InvalidGovernanceMessage), + } + + self.wormhole.set(address); + Ok(()) + } + + fn authorize_governance_transfer( + &mut self, + claim_vaa: Vec, + ) -> Result<(), PythReceiverError> { + let wormhole: IWormholeContract = IWormholeContract::new(self.wormhole.get()); + let config = Call::new(); + wormhole + .parse_and_verify_vm(config, claim_vaa.clone()) + .map_err(|_| PythReceiverError::InvalidWormholeMessage)?; + + let claim_vm = Vaa::read(&mut Vec::from(claim_vaa).as_slice()) + .map_err(|_| PythReceiverError::VaaVerificationFailed)?; + + let instruction = governance_structs::parse_instruction(claim_vm.body.payload.to_vec()) + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?; + + let config2 = Call::new(); + if instruction.target_chain_id != 0 + && instruction.target_chain_id != wormhole.chain_id(config2).unwrap_or(0) + { + return Err(PythReceiverError::InvalidGovernanceTarget); + } + + let request_payload = match instruction.payload { + GovernancePayload::RequestGovernanceDataSourceTransfer(payload) => payload, + _ => return Err(PythReceiverError::InvalidGovernanceMessage), + }; + + let current_index = self.governance_data_source_index.get().to::(); + let new_index = request_payload.governance_data_source_index; + + if current_index >= new_index { + return Err(PythReceiverError::OldGovernanceMessage); + } + + self.governance_data_source_index.set(U32::from(new_index)); + let old_data_source_emitter_address = self.governance_data_source_emitter_address.get(); + + self.governance_data_source_chain_id + .set(U16::from(claim_vm.body.emitter_chain)); + let emitter_bytes: [u8; 32] = claim_vm + .body + .emitter_address + .as_slice() + .try_into() + .map_err(|_| PythReceiverError::InvalidEmitterAddress)?; + self.governance_data_source_emitter_address + .set(FixedBytes::from(emitter_bytes)); + + let last_executed_governance_sequence = claim_vm.body.sequence.to::(); + self.last_executed_governance_sequence + .set(U64::from(last_executed_governance_sequence)); + + log( + self.vm(), + crate::GovernanceDataSourceSet { + old_chain_id: current_index as u16, + old_emitter_address: old_data_source_emitter_address, + new_chain_id: claim_vm.body.emitter_chain, + new_emitter_address: FixedBytes::from(emitter_bytes), + initial_sequence: last_executed_governance_sequence, + }, + ); + + Ok(()) + } + + fn set_transaction_fee(&mut self, value: u64, expo: u64) { + let new_fee = U256::from(value).saturating_mul(U256::from(10).pow(U256::from(expo))); + let old_fee = self.transaction_fee_in_wei.get(); + + self.transaction_fee_in_wei.set(new_fee); + + log(self.vm(), crate::TransactionFeeSet { old_fee, new_fee }); + } + + fn withdraw_fee( + &mut self, + value: u64, + expo: u64, + target_address: Address, + ) -> Result<(), PythReceiverError> { + let fee_to_withdraw = + U256::from(value).saturating_mul(U256::from(10).pow(U256::from(expo))); + let current_balance = self.vm().balance(self.vm().contract_address()); + + if current_balance < fee_to_withdraw { + return Err(PythReceiverError::InsufficientFee); + } + + self.vm() + .transfer_eth(target_address, fee_to_withdraw) + .map_err(|_| PythReceiverError::InsufficientFee)?; + + log( + self.vm(), + crate::FeeWithdrawn { + target_address, + fee_amount: fee_to_withdraw, + }, + ); + + Ok(()) + } +} + +pub fn verify_governance_vm(receiver: &mut PythReceiver, vm: Vaa) -> Result<(), PythReceiverError> { + if vm.body.emitter_chain != receiver.governance_data_source_chain_id.get().to::() { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + + if vm.body.emitter_address.as_slice() + != receiver + .governance_data_source_emitter_address + .get() + .as_slice() + { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + + let current_sequence = vm.body.sequence.to::(); + let last_executed_sequence = receiver.last_executed_governance_sequence.get().to::(); + + if current_sequence <= last_executed_sequence { + return Err(PythReceiverError::GovernanceMessageAlreadyExecuted); + } + + receiver + .last_executed_governance_sequence + .set(U64::from(current_sequence)); + + Ok(()) +} + +pub fn set_data_sources(receiver: &mut PythReceiver, data_sources: Vec) { + let mut old_data_sources = Vec::new(); + for i in 0..receiver.valid_data_sources.len() { + if let Some(storage_data_source) = receiver.valid_data_sources.get(i) { + let data_source = DataSource { + chain_id: storage_data_source.chain_id.get(), + emitter_address: storage_data_source.emitter_address.get(), + }; + old_data_sources.push(data_source.emitter_address); + receiver.is_valid_data_source.setter(data_source).set(false); + } + } + + receiver.valid_data_sources.erase(); + + let mut new_data_sources = Vec::new(); + for data_source in data_sources { + let mut storage_data_source = receiver.valid_data_sources.grow(); + storage_data_source.chain_id.set(data_source.chain_id); + storage_data_source + .emitter_address + .set(data_source.emitter_address); + + new_data_sources.push(data_source.emitter_address); + receiver.is_valid_data_source.setter(data_source).set(true); + } + + log( + receiver.vm(), + crate::DataSourcesSet { + old_data_sources, + new_data_sources, + }, + ); +} diff --git a/target_chains/stylus/contracts/pyth-receiver/src/governance_structs.rs b/target_chains/stylus/contracts/pyth-receiver/src/governance_structs.rs new file mode 100644 index 0000000000..75ad67cdbf --- /dev/null +++ b/target_chains/stylus/contracts/pyth-receiver/src/governance_structs.rs @@ -0,0 +1,337 @@ +use crate::error::PythReceiverError; +use crate::structs::DataSource; +use alloc::vec::Vec; +use stylus_sdk::alloy_primitives::{Address, FixedBytes, U16}; + +// Magic is `PTGM` encoded as a 4 byte data: Pyth Governance Message +const MAGIC: u32 = 0x5054474d; +const MODULE_TARGET: u8 = 1; + +#[derive(Clone, Debug, PartialEq)] +pub enum GovernanceAction { + UpgradeContract, + AuthorizeGovernanceDataSourceTransfer, + SetDataSources, + SetFee, + SetValidPeriod, + RequestGovernanceDataSourceTransfer, + SetWormholeAddress, + SetTransactionFee, + WithdrawFee, +} + +impl TryFrom for GovernanceAction { + type Error = PythReceiverError; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(GovernanceAction::UpgradeContract), + 1 => Ok(GovernanceAction::AuthorizeGovernanceDataSourceTransfer), + 2 => Ok(GovernanceAction::SetDataSources), + 3 => Ok(GovernanceAction::SetFee), + 4 => Ok(GovernanceAction::SetValidPeriod), + 5 => Ok(GovernanceAction::RequestGovernanceDataSourceTransfer), + 6 => Ok(GovernanceAction::SetWormholeAddress), + 8 => Ok(GovernanceAction::SetTransactionFee), + 9 => Ok(GovernanceAction::WithdrawFee), + _ => Err(PythReceiverError::InvalidGovernanceAction), + } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct GovernanceInstruction { + pub target_chain_id: u16, + pub payload: GovernancePayload, +} + +#[derive(Clone, Debug, PartialEq)] +pub enum GovernancePayload { + UpgradeContract(UpgradeContract), + AuthorizeGovernanceDataSourceTransfer(AuthorizeGovernanceDataSourceTransfer), + SetDataSources(SetDataSources), + SetFee(SetFee), + SetValidPeriod(SetValidPeriod), + RequestGovernanceDataSourceTransfer(RequestGovernanceDataSourceTransfer), + SetWormholeAddress(SetWormholeAddress), + SetTransactionFee(SetTransactionFee), + WithdrawFee(WithdrawFee), +} + +#[derive(Clone, Debug, PartialEq)] +pub struct SetFee { + pub value: u64, + pub expo: u64, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct SetValidPeriod { + pub valid_time_period_seconds: u64, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct SetTransactionFee { + pub value: u64, + pub expo: u64, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct WithdrawFee { + pub value: u64, + pub expo: u64, + pub target_address: Address, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct SetDataSources { + pub sources: Vec, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct SetWormholeAddress { + pub address: Address, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct RequestGovernanceDataSourceTransfer { + pub governance_data_source_index: u32, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct AuthorizeGovernanceDataSourceTransfer { + pub claim_vaa: Vec, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct UpgradeContract { + pub new_implementation: FixedBytes<32>, +} + +pub fn parse_instruction(payload: Vec) -> Result { + if payload.len() < 8 { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + + let mut cursor = 0; + + let magic_bytes = payload + .get(cursor..cursor + 4) + .ok_or(PythReceiverError::InvalidGovernanceMessage)?; + + let magic = u32::from_be_bytes( + magic_bytes + .try_into() + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?, + ); + + cursor += 4; + + if magic != MAGIC { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + + let module = payload[cursor]; + cursor += 1; + + if module != MODULE_TARGET { + return Err(PythReceiverError::InvalidGovernanceTarget); + } + + let action = GovernanceAction::try_from(payload[cursor])?; + cursor += 1; + + let target_chain_id = u16::from_be_bytes([payload[cursor], payload[cursor + 1]]); + cursor += 2; + + let governance_payload = match action { + GovernanceAction::UpgradeContract => { + if payload.len() < cursor + 32 { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + let mut new_implementation = [0u8; 32]; + new_implementation.copy_from_slice(&payload[cursor..cursor + 32]); + cursor += 32; + GovernancePayload::UpgradeContract(UpgradeContract { + new_implementation: FixedBytes::from(new_implementation), + }) + } + GovernanceAction::AuthorizeGovernanceDataSourceTransfer => { + let claim_vaa = payload[cursor..].to_vec(); + cursor = payload.len(); + GovernancePayload::AuthorizeGovernanceDataSourceTransfer( + AuthorizeGovernanceDataSourceTransfer { claim_vaa }, + ) + } + GovernanceAction::RequestGovernanceDataSourceTransfer => { + if payload.len() < cursor + 4 { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + let governance_data_source_bytes = payload + .get(cursor..cursor + 4) + .ok_or(PythReceiverError::InvalidGovernanceMessage)?; + + let governance_data_source_index = u32::from_be_bytes( + governance_data_source_bytes + .try_into() + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?, + ); + + cursor += 4; + GovernancePayload::RequestGovernanceDataSourceTransfer( + RequestGovernanceDataSourceTransfer { + governance_data_source_index, + }, + ) + } + GovernanceAction::SetDataSources => { + if payload.len() < cursor + 1 { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + let num_sources = payload[cursor]; + cursor += 1; + + let mut sources = Vec::new(); + for _ in 0..num_sources { + if payload.len() < cursor + 34 { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + let emitter_chain_id = u16::from_be_bytes([payload[cursor], payload[cursor + 1]]); + cursor += 2; + + let mut emitter_address = [0u8; 32]; + emitter_address.copy_from_slice(&payload[cursor..cursor + 32]); + cursor += 32; + + sources.push(DataSource { + chain_id: U16::from(emitter_chain_id), + emitter_address: FixedBytes::from(emitter_address), + }); + } + GovernancePayload::SetDataSources(SetDataSources { sources }) + } + GovernanceAction::SetFee => { + if payload.len() < cursor + 16 { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + let fee_value_bytes = payload + .get(cursor..cursor + 8) + .ok_or(PythReceiverError::InvalidGovernanceMessage)?; + + let value = u64::from_be_bytes( + fee_value_bytes + .try_into() + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?, + ); + + cursor += 8; + + let expo_bytes = payload + .get(cursor..cursor + 8) + .ok_or(PythReceiverError::InvalidGovernanceMessage)?; + let expo = u64::from_be_bytes( + expo_bytes + .try_into() + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?, + ); + + cursor += 8; + GovernancePayload::SetFee(SetFee { value, expo }) + } + GovernanceAction::SetValidPeriod => { + let valid_period_bytes = payload + .get(cursor..cursor + 8) + .ok_or(PythReceiverError::InvalidGovernanceMessage)?; + let valid_time_period_seconds = u64::from_be_bytes( + valid_period_bytes + .try_into() + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?, + ); + cursor += 8; + GovernancePayload::SetValidPeriod(SetValidPeriod { + valid_time_period_seconds, + }) + } + GovernanceAction::SetWormholeAddress => { + let address_bytes: &[u8; 20] = payload + .get(cursor..cursor + 20) + .ok_or(PythReceiverError::InvalidGovernanceMessage)? + .try_into() + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?; + cursor += 20; + GovernancePayload::SetWormholeAddress(SetWormholeAddress { + address: Address::from(address_bytes), + }) + } + GovernanceAction::SetTransactionFee => { + let fee_value_bytes = payload + .get(cursor..cursor + 8) + .ok_or(PythReceiverError::InvalidGovernanceMessage)?; + + let value = u64::from_be_bytes( + fee_value_bytes + .try_into() + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?, + ); + + cursor += 8; + + let expo_bytes = payload + .get(cursor..cursor + 8) + .ok_or(PythReceiverError::InvalidGovernanceMessage)?; + let expo = u64::from_be_bytes( + expo_bytes + .try_into() + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?, + ); + + cursor += 8; + GovernancePayload::SetTransactionFee(SetTransactionFee { value, expo }) + } + GovernanceAction::WithdrawFee => { + if payload.len() < cursor + 28 { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + + let mut target_address_bytes = [0u8; 20]; + target_address_bytes.copy_from_slice(&payload[cursor..cursor + 20]); + cursor += 20; + + let fee_value_bytes = payload + .get(cursor..cursor + 8) + .ok_or(PythReceiverError::InvalidGovernanceMessage)?; + + let value = u64::from_be_bytes( + fee_value_bytes + .try_into() + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?, + ); + + cursor += 8; + + let expo_bytes = payload + .get(cursor..cursor + 8) + .ok_or(PythReceiverError::InvalidGovernanceMessage)?; + let expo = u64::from_be_bytes( + expo_bytes + .try_into() + .map_err(|_| PythReceiverError::InvalidGovernanceMessage)?, + ); + + cursor += 8; + GovernancePayload::WithdrawFee(WithdrawFee { + value, + expo, + target_address: Address::from(target_address_bytes), + }) + } + }; + + if cursor != payload.len() { + return Err(PythReceiverError::InvalidGovernanceMessage); + } + + Ok(GovernanceInstruction { + target_chain_id, + payload: governance_payload, + }) +} diff --git a/target_chains/stylus/contracts/pyth-receiver/src/integration_tests.rs b/target_chains/stylus/contracts/pyth-receiver/src/integration_tests.rs index 33b7c5440e..99d8c9ca0d 100644 --- a/target_chains/stylus/contracts/pyth-receiver/src/integration_tests.rs +++ b/target_chains/stylus/contracts/pyth-receiver/src/integration_tests.rs @@ -42,6 +42,7 @@ mod test { Ok(get_total_fee(total_num_updates)) } + #[cfg(test)] fn get_total_fee(total_num_updates: u64) -> U256 { U256::from(total_num_updates).saturating_mul(SINGLE_UPDATE_FEE_IN_WEI) + TRANSACTION_FEE_IN_WEI diff --git a/target_chains/stylus/contracts/pyth-receiver/src/lib.rs b/target_chains/stylus/contracts/pyth-receiver/src/lib.rs index 91730e534d..ab88aa1428 100644 --- a/target_chains/stylus/contracts/pyth-receiver/src/lib.rs +++ b/target_chains/stylus/contracts/pyth-receiver/src/lib.rs @@ -6,8 +6,12 @@ extern crate alloc; mod error; +mod governance; +mod governance_structs; #[cfg(test)] mod integration_tests; +#[cfg(test)] +mod pyth_governance_test; mod structs; #[cfg(test)] mod test_data; @@ -18,6 +22,7 @@ use mock_instant::global::MockClock; use alloc::vec::Vec; use stylus_sdk::{ alloy_primitives::{Address, FixedBytes, I32, I64, U16, U256, U32, U64}, + alloy_sol_types::sol, call::Call, prelude::*, storage::{ @@ -42,13 +47,23 @@ use pythnet_sdk::{ use structs::{DataSource, DataSourceStorage, PriceFeedReturn, PriceFeedStorage, PriceReturn}; use wormhole_vaas::{Readable, Vaa, Writeable}; +sol! { + event FeeSet(uint256 indexed old_fee, uint256 indexed new_fee); + event TransactionFeeSet(uint256 indexed old_fee, uint256 indexed new_fee); + event FeeWithdrawn(address indexed target_address, uint256 fee_amount); + event ValidPeriodSet(uint256 indexed old_valid_period, uint256 indexed new_valid_period); + event DataSourcesSet(bytes32[] old_data_sources, bytes32[] new_data_sources); + event GovernanceDataSourceSet(uint16 old_chain_id, bytes32 old_emitter_address, uint16 new_chain_id, bytes32 new_emitter_address, uint64 initial_sequence); +} + sol_interface! { - interface IWormholeContract { - function initialize(address[] memory initial_guardians, uint16 chain_id, uint16 governance_chain_id, address governance_contract) external; - function getGuardianSet(uint32 index) external view returns (uint8[] memory); - function parseAndVerifyVm(uint8[] memory encoded_vaa) external view returns (uint8[] memory); - function quorum(uint32 num_guardians) external pure returns (uint32); - } + interface IWormholeContract { + function initialize(address[] memory initial_guardians, uint32 initial_guardian_set_index, uint16 chain_id, uint16 governance_chain_id, address governance_contract) external; + function getGuardianSet(uint32 index) external view returns (uint8[] memory); + function parseAndVerifyVm(uint8[] memory encoded_vaa) external view returns (uint8[] memory); + function quorum(uint32 num_guardians) external pure returns (uint32); + function chainId() external view returns (uint16); +} } #[storage] @@ -375,8 +390,8 @@ impl PythReceiver { max_allowed_publish_time: u64, check_uniqueness: bool, ) -> Result, PythReceiverError> { - let update_data_array: &[u8] = &update_data; // Check the first 4 bytes of the update_data_array for the magic header + let update_data_array: &[u8] = &update_data; if update_data_array.len() < 4 { return Err(PythReceiverError::InvalidUpdateData); } @@ -516,6 +531,13 @@ impl PythReceiver { price_feeds } + pub fn execute_governance_instruction( + &mut self, + data: Vec, + ) -> Result<(), PythReceiverError> { + self.execute_governance_instruction_internal(data) + } + fn is_no_older_than(&self, publish_time: U64, max_age: u64) -> bool { self.get_current_timestamp() .saturating_sub(publish_time.to::()) diff --git a/target_chains/stylus/contracts/pyth-receiver/src/pyth_governance_test.rs b/target_chains/stylus/contracts/pyth-receiver/src/pyth_governance_test.rs new file mode 100644 index 0000000000..11ee65c8a9 --- /dev/null +++ b/target_chains/stylus/contracts/pyth-receiver/src/pyth_governance_test.rs @@ -0,0 +1,529 @@ +#[cfg(test)] +mod test { + use crate::{ + DataSourcesSet, FeeSet, GovernanceDataSourceSet, PythReceiver, TransactionFeeSet, + ValidPeriodSet, + }; + use alloy_primitives::{address, Address, FixedBytes, U256}; + use hex::FromHex; + use motsu::prelude::*; + use wormhole_contract::WormholeContract; + + const PYTHNET_CHAIN_ID: u16 = 26; + const PYTHNET_EMITTER_ADDRESS: [u8; 32] = [ + 0xe1, 0x01, 0xfa, 0xed, 0xac, 0x58, 0x51, 0xe3, 0x2b, 0x9b, 0x23, 0xb5, 0xf9, 0x41, 0x1a, + 0x8c, 0x2b, 0xac, 0x4a, 0xae, 0x3e, 0xd4, 0xdd, 0x7b, 0x81, 0x1d, 0xd1, 0xa7, 0x2e, 0xa4, + 0xaa, 0x71, + ]; + + const CHAIN_ID: u16 = 2; + const GOVERNANCE_CONTRACT: U256 = U256::from_limbs([4, 0, 0, 0]); + + const SINGLE_UPDATE_FEE_IN_WEI: U256 = U256::from_limbs([100, 0, 0, 0]); + const TRANSACTION_FEE_IN_WEI: U256 = U256::from_limbs([32, 0, 0, 0]); + + const TEST_SIGNER1: Address = Address::new([ + 0xbe, 0xFA, 0x42, 0x9d, 0x57, 0xcD, 0x18, 0xb7, 0xF8, 0xA4, 0xd9, 0x1A, 0x2d, 0xa9, 0xAB, + 0x4A, 0xF0, 0x5d, 0x0F, 0xBe, + ]); + const TEST_SIGNER2: Address = Address::new([ + 0x4b, 0xa0, 0xC2, 0xdb, 0x9A, 0x26, 0x20, 0x8b, 0x3b, 0xB1, 0xa5, 0x0B, 0x01, 0xb1, 0x69, + 0x41, 0xc1, 0x0D, 0x76, 0xdb, + ]); + const GOVERNANCE_CHAIN_ID: u16 = 1; + const GOVERNANCE_EMITTER: [u8; 32] = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x11, + ]; + const TEST_PYTH2_WORMHOLE_CHAIN_ID: u16 = 1; + const TEST_PYTH2_WORMHOLE_EMITTER: [u8; 32] = [ + 0x71, 0xf8, 0xdc, 0xb8, 0x63, 0xd1, 0x76, 0xe2, 0xc4, 0x20, 0xad, 0x66, 0x10, 0xcf, 0x68, + 0x73, 0x59, 0x61, 0x2b, 0x6f, 0xb3, 0x92, 0xe0, 0x64, 0x2b, 0x0c, 0xa6, 0xb1, 0xf1, 0x86, + 0xaa, 0x3b, + ]; + const TARGET_CHAIN_ID: u16 = 2; + + #[cfg(test)] + fn pyth_wormhole_init( + pyth_contract: &Contract, + wormhole_contract: &Contract, + alice: &Address, + guardian_set_index: u32, + ) { + let guardians = vec![address!("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf")]; + + let governance_contract = + Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); + wormhole_contract + .sender(*alice) + .initialize( + guardians, + guardian_set_index, + CHAIN_ID, + GOVERNANCE_CHAIN_ID, + governance_contract, + ) + .unwrap(); + + let single_update_fee = SINGLE_UPDATE_FEE_IN_WEI; + let valid_time_period = U256::from(3600u64); + + let data_source_chain_ids = vec![PYTHNET_CHAIN_ID]; + let data_source_emitter_addresses = vec![PYTHNET_EMITTER_ADDRESS]; + + let governance_chain_id = 1u16; + let governance_initial_sequence = 0u64; + + pyth_contract.sender(*alice).initialize( + wormhole_contract.address(), + single_update_fee, + valid_time_period, + data_source_chain_ids, + data_source_emitter_addresses, + governance_chain_id, + GOVERNANCE_EMITTER, + governance_initial_sequence, + ); + } + + #[motsu::test] + fn test_set_data_sources( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + let hex_str = "0100000000010069825ef00344cf745b6e72a41d4f869d4e90de517849360c72bf94efc97681671d826e484747b21a80c8f1e7816021df9f55e458a6e7a717cb2bd2a1e85fd57100499602d200000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d010200020100010000000000000000000000000000000000000000000000000000000000001111"; + let bytes = Vec::from_hex(hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + if result.is_err() { + println!("SetDataSources Error: {:?}", result.as_ref().unwrap_err()); + } + assert!(result.is_ok()); + + let expected_event = DataSourcesSet { + old_data_sources: vec![FixedBytes::from(PYTHNET_EMITTER_ADDRESS)], + new_data_sources: vec![FixedBytes::from([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x11, 0x11, + ])], + }; + assert!( + pyth_contract.emitted(&expected_event), + "DataSourcesSet event should be emitted" + ); + + let result2 = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + assert!( + result2.is_err(), + "Second execution should fail due to sequence number check" + ); + } + + #[motsu::test] + fn test_set_valid_period( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + let hex_str = "01000000000100c9effcab077af2f3f65a7abfd1883295529eab7c0d4434772ed1f2d10b1de3571c214af45e944a3fee65417c9f0c6024010dadc26d30bb361e05f552ca4de04d000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d010400020000000000000003"; + let bytes = Vec::from_hex(hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!( + result.is_ok(), + "SetValidPeriod governance instruction should succeed" + ); + + let expected_event = ValidPeriodSet { + old_valid_period: U256::from(3600), + new_valid_period: U256::from(3), + }; + + assert!( + pyth_contract.emitted(&expected_event), + "ValidPeriodSet event should be emitted" + ); + } + + #[motsu::test] + fn test_set_fee( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + let hex_str = "0100000000010057940f58a6a44c93606bd721701539e0da93d5ea1583a735fbb13ecbcf9c01fc70240de519ea76869af14d067d68c5f3f2230f565f41b7009f3c3e63749353ed000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d0103000200000000000000050000000000000003"; + let bytes = Vec::from_hex(hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!(result.is_ok()); + + let expected_new_fee = U256::from(5000); + let expected_event = FeeSet { + old_fee: SINGLE_UPDATE_FEE_IN_WEI, + new_fee: expected_new_fee, + }; + assert!( + pyth_contract.emitted(&expected_event), + "FeeSet event should be emitted" + ); + + let result2 = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + assert!( + result2.is_err(), + "Second execution should fail due to sequence number check" + ); + } + + // This test is commented out because it requires an already deployed new Wormhole contract. + // This function demonstrates the usage of this instruction, however. + /* + #[motsu::test] + fn test_set_wormhole_address( + pyth_contract: Contract, + wormhole_contract: Contract, + wormhole_contract_2: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + let guardians = vec![address!("0x7e5f4552091a69125d5dfcb7b8c2659029395bdf")]; + let governance_contract = + Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); + wormhole_contract_2 + .sender(alice) + .initialize( + guardians, + 0, + CHAIN_ID, + GOVERNANCE_CHAIN_ID, + governance_contract, + ) + .unwrap(); + + + + let hex_str = format!("010000000001001daf08e5e3799cbc6096a90c2361e43220325418f377620a7a73d6bece18322679f6ada9725d9081743805efb8bccecd51098f1d76f34cba8b835fae643bbd9c000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d01060002{:040x}", wormhole_contract_2.address()); + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + if result.is_err() { + println!( + "SetWormholeAddress Error: {:?}", + result.as_ref().unwrap_err() + ); + } + */ + + #[motsu::test] + fn test_authorize_governance_data_source_transfer( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + let hex_str = "01000000000100eb6abceff17a900422cbe415bd4776aa6477ee6ec7f3f58d1635ea2071fb915e43c6ac312b34996d4a76c52de96a8c2cc1c50aacb45aa2013eb6c8d05a472f94010000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d01010002010000000001006fc27ac424b300c23a564bcabe1d7888a898cba92b8aec62468c35025baaf4a87056c50d443fbc172c3caa30d28ec57cefc0bbabf4590ffe98c44dff040d0e02000000000100000000000200000000000000000000000000000000000000000000000000000000000011110000000000000001005054474d0105000200000001"; + let bytes = Vec::from_hex(hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + if result.is_err() { + println!( + "AuthorizeGovernanceDataSourceTransfer Error: {:?}", + result.as_ref().unwrap_err() + ); + } + assert!(result.is_ok()); + + const NEW_GOVERNANCE_EMITTER: [u8; 32] = [ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x11, 0x11, + ]; + + let expected_event = GovernanceDataSourceSet { + old_chain_id: 0, // Initial governance_data_source_index + old_emitter_address: FixedBytes::from(GOVERNANCE_EMITTER), // Initial governance emitter from pyth_wormhole_init + new_chain_id: 2, // claim_vm.body.emitter_chain from the VAA + new_emitter_address: FixedBytes::from(NEW_GOVERNANCE_EMITTER), // emitter_bytes from the VAA + initial_sequence: 1, // claim_vm.body.sequence from the VAA (0x64 = 100) + }; + assert!( + pyth_contract.emitted(&expected_event), + "GovernanceDataSourceSet event should be emitted" + ); + } + + #[motsu::test] + fn test_set_transaction_fee( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + let hex_str = "010000000001001554008232e74cb3ac74acc4527ead8a39637c537ec9b3d1fbb624c1f4f52e341e24ae89d978e033f5345e4af244df0ec61f380d9e33330f439d2b6764850270010000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d0108000200000000000000640000000000000003"; + let bytes = Vec::from_hex(hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + if result.is_err() { + println!( + "SetTransactionFee Error: {:?}", + result.as_ref().unwrap_err() + ); + } + assert!(result.is_ok()); + + let expected_new_fee = U256::from(100000); + let expected_event = TransactionFeeSet { + old_fee: U256::ZERO, + new_fee: expected_new_fee, + }; + assert!( + pyth_contract.emitted(&expected_event), + "TransactionFeeSet event should be emitted" + ); + + let result2 = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + assert!( + result2.is_err(), + "Second execution should fail due to sequence number check" + ); + } + + // Fee transfers can't be done in the motsu testing framework. This commented test serves as an example for how to use the function, though. + + /* + #[motsu::test] + fn test_withdraw_fee( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + let hex_str = "0100000000010030f48904e130d76ee219bc59988f89526e5c9860e89efda3a74e33c3ab53d4e6036d1c67249d2f25a27e8c94d203609785839e3e4817d0a03214ea8bbf6a8415000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d0109000270997970c51812dc3a010c7d01b50e0d17dc79c800000000000000640000000000000003"; + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + pyth_contract.address().fund(U256::from(200000u64)); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + if result.is_err() { + println!("WithdrawFee Error: {:?}", result.as_ref().unwrap_err()); + } + assert!(result.is_ok()); + } + */ + + #[motsu::test] + fn test_invalid_wormhole_vaa_signature_reverts( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + let hex_str = "0100000000010067940f58a6a44c93606bd721701539e0da93d5ea1583a735fbb13ecbcf9c01fc70240de519ea76869af14d067d68c5f3f2230f565f41b7009f3c3e63749353ed000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d0103000200000000000000050000000000000003"; + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!(result.is_err(), "Invalid VAA should revert the transaction"); + } + + #[motsu::test] + fn test_invalid_wormhole_vaa_magic_reverts( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + // Changed the magic signature to an invalid one (6064474d instead of 5054474d) + let hex_str = "0100000000010067940f58a6a44c93606bd721701539e0da93d5ea1583a735fbb13ecbcf9c01fc70240de519ea76869af14d067d68c5f3f2230f565f41b7009f3c3e63749353ed000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001006064474d0103000200000000000000050000000000000003"; + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!(result.is_err(), "Invalid VAA should revert the transaction"); + } + + #[motsu::test] + fn test_invalid_wormhole_vaa_random_byte_cut_reverts( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + let hex_str = "0100000000010067940f58a676869af14d067d68c5f3f2230f565f41b7009f3c3e63749353ed000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d0103000200000000000000050000000000000003"; + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!(result.is_err(), "Invalid VAA should revert the transaction"); + } + + #[motsu::test] + fn test_invalid_wormhole_vaa_invalid_version_number_reverts( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + // Changed the version number to an invalid one (2 instead of 1) + let hex_str = "0200000000010067940f58a6a44c93606bd721701539e0da93d5ea1583a735fbb13ecbcf9c01fc70240de519ea76869af14d067d68c5f3f2230f565f41b7009f3c3e63749353ed000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d0103000200000000000000050000000000000003"; + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!(result.is_err(), "Invalid VAA should revert the transaction"); + } + + #[motsu::test] + fn test_different_emitter_chain_id_than_wormhole_reverts( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + // Changed the emitter chain ID to a different one (2 instead of 1) + let hex_str = "0100000000010057940f58a6a44c93606bd721701539e0da93d5ea1583a735fbb13ecbcf9c01fc70240de519ea76869af14d067d68c5f3f2230f565f41b7009f3c3e63749353ed000000000100000000000200000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d0103000200000000000000050000000000000003"; + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!(result.is_err(), "Invalid VAA should revert the transaction"); + } + + #[motsu::test] + fn test_different_emitter_chain_address_than_wormhole_reverts( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + // Changed the emitter chain ID to a different one (...0011 to ...0022) + let hex_str = "0100000000010057940f58a6a44c93606bd721701539e0da93d5ea1583a735fbb13ecbcf9c01fc70240de519ea76869af14d067d68c5f3f2230f565f41b7009f3c3e63749353ed000000000100000000000100000000000000000000000000000000000000000000000000000000000000220000000000000001005054474d0103000200000000000000050000000000000003"; + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!(result.is_err(), "Invalid VAA should revert the transaction"); + } + + #[motsu::test] + fn test_sequence_number_greater_than_last_executed_reverts( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + let hex_str = "0100000000010057940f58a6a44c93606bd721701539e0da93d5ea1583a735fbb13ecbcf9c01fc70240de519ea76869af14d067d68c5f3f2230f565f41b7009f3c3e63749353ed000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d0103000200000000000000050000000000000003"; + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!(result.is_ok(), "This is a valid VAA, should go through"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!( + result.is_err(), + "Cannot execute the same sequence number again, should revert" + ); + } + + #[motsu::test] + fn test_target_chain_id_from_ethereum_to_solana_reverts( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + // This VAA is for a target chain ID of 1 (Solana), but the PythReceiver is on chain ID 2 (Ethereum) + let hex_str = "0100000000010057940f58a6a44c93606bd721701539e0da93d5ea1583a735fbb13ecbcf9c01fc70240de519ea76869af14d067d68c5f3f2230f565f41b7009f3c3e63749353ed000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d0103000100000000000000050000000000000003"; + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!( + result.is_err(), + "Incorrect target chain ID should revert the transaction" + ); + } + + #[motsu::test] + fn test_unexpected_governance_action_id_reverts( + pyth_contract: Contract, + wormhole_contract: Contract, + alice: Address, + ) { + pyth_wormhole_init(&pyth_contract, &wormhole_contract, &alice, 0); + + // Changes this action to be a SetDataSources action instead of a SetFee action + let hex_str = "0100000000010057940f58a6a44c93606bd721701539e0da93d5ea1583a735fbb13ecbcf9c01fc70240de519ea76869af14d067d68c5f3f2230f565f41b7009f3c3e63749353ed000000000100000000000100000000000000000000000000000000000000000000000000000000000000110000000000000001005054474d0102000200000000000000050000000000000003"; + let bytes = Vec::from_hex(&hex_str).expect("Invalid hex string"); + + let result = pyth_contract + .sender(alice) + .execute_governance_instruction(bytes.clone()); + + assert!( + result.is_err(), + "Wrong action expected should lead to bad parsing" + ); + } +} diff --git a/target_chains/stylus/contracts/pyth-receiver/src/structs.rs b/target_chains/stylus/contracts/pyth-receiver/src/structs.rs index a6bfdaa448..90504a6cc8 100644 --- a/target_chains/stylus/contracts/pyth-receiver/src/structs.rs +++ b/target_chains/stylus/contracts/pyth-receiver/src/structs.rs @@ -19,6 +19,13 @@ pub struct DataSourceStorage { pub emitter_address: StorageFixedBytes<32>, } +impl Erase for DataSourceStorage { + fn erase(&mut self) { + self.chain_id.erase(); + self.emitter_address.erase(); + } +} + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct DataSource { pub chain_id: U16, diff --git a/target_chains/stylus/contracts/wormhole/src/lib.rs b/target_chains/stylus/contracts/wormhole/src/lib.rs index ce638e0580..1d23eb2282 100644 --- a/target_chains/stylus/contracts/wormhole/src/lib.rs +++ b/target_chains/stylus/contracts/wormhole/src/lib.rs @@ -1,6 +1,5 @@ #![cfg_attr(not(any(test, feature = "export-abi")), no_main)] #![cfg_attr(not(any(test, feature = "export-abi")), no_std)] - #![macro_use] extern crate alloc; @@ -9,9 +8,9 @@ static ALLOC: mini_alloc::MiniAlloc = mini_alloc::MiniAlloc::INIT; use alloc::{vec, vec::Vec}; use stylus_sdk::{ - prelude::*, - storage::{StorageMap, StorageUint, StorageAddress, StorageBool}, alloy_primitives::{Address, FixedBytes, U256, keccak256}, + prelude::*, + storage::{StorageAddress, StorageBool, StorageMap, StorageUint}, }; use k256::ecdsa::{RecoveryId, Signature, VerifyingKey}; @@ -125,9 +124,11 @@ impl WormholeContract { if self.initialized.get() { return Err(WormholeError::AlreadyInitialized.into()); } - self.current_guardian_set_index.set(U256::from(initial_guardian_set_index)); + self.current_guardian_set_index + .set(U256::from(initial_guardian_set_index)); self.chain_id.set(U256::from(chain_id)); - self.governance_chain_id.set(U256::from(governance_chain_id)); + self.governance_chain_id + .set(U256::from(governance_chain_id)); self.governance_contract.set(governance_contract); self.store_gs(initial_guardian_set_index, initial_guardians, 0)?; @@ -148,7 +149,7 @@ impl WormholeContract { encoded.extend_from_slice(address.as_slice()); } Ok(encoded) - }, + } Err(e) => Err(e.into()), } } @@ -172,6 +173,9 @@ impl WormholeContract { fn quorum(num_guardians: u32) -> u32 { (num_guardians * 2) / 3 + 1 } + fn chain_id(&self) -> u16 { + self.chain_id.get().try_into().unwrap_or(0u16) + } } impl WormholeContract { @@ -185,118 +189,131 @@ impl WormholeContract { if encoded_vaa.len() < 6 { return Err(WormholeError::InvalidVAAFormat); } - + let mut cursor = 0; - + // Get version - let version = encoded_vaa.get(cursor) + let version = encoded_vaa + .get(cursor) .ok_or(WormholeError::InvalidVAAFormat)?; cursor += 1; - + if *version != 1 { return Err(WormholeError::InvalidVAAFormat); } - + // Get guardian set index - let gsi_bytes = encoded_vaa.get(cursor..cursor + 4) + let gsi_bytes = encoded_vaa + .get(cursor..cursor + 4) .ok_or(WormholeError::InvalidVAAFormat)?; let guardian_set_index = u32::from_be_bytes( - gsi_bytes.try_into() - .map_err(|_| WormholeError::InvalidVAAFormat)? + gsi_bytes + .try_into() + .map_err(|_| WormholeError::InvalidVAAFormat)?, ); cursor += 4; - + // Get number of signatures - let len_signatures = *encoded_vaa.get(cursor) + let len_signatures = *encoded_vaa + .get(cursor) .ok_or(WormholeError::InvalidVAAFormat)?; cursor += 1; - + if len_signatures > 19 { return Err(WormholeError::InvalidVAAFormat); } - + let mut signatures = Vec::with_capacity(len_signatures as usize); - + for _ in 0..len_signatures { if cursor + 66 > encoded_vaa.len() { return Err(WormholeError::InvalidVAAFormat); } - - let guardian_index = *encoded_vaa.get(cursor) + + let guardian_index = *encoded_vaa + .get(cursor) .ok_or(WormholeError::InvalidVAAFormat)?; cursor += 1; - - let sig_bytes = encoded_vaa.get(cursor..cursor + 65) + + let sig_bytes = encoded_vaa + .get(cursor..cursor + 65) .ok_or(WormholeError::InvalidVAAFormat)?; let mut fixed_sig = [0u8; 65]; fixed_sig.copy_from_slice(sig_bytes); cursor += 65; - + signatures.push(GuardianSignature { guardian_index, signature: FixedBytes::from(fixed_sig), }); } - + if cursor + 51 > encoded_vaa.len() { return Err(WormholeError::InvalidVAAFormat); } - + // Get timestamp - let ts_bytes = encoded_vaa.get(cursor..cursor + 4) + let ts_bytes = encoded_vaa + .get(cursor..cursor + 4) .ok_or(WormholeError::InvalidVAAFormat)?; let timestamp = u32::from_be_bytes( - ts_bytes.try_into() - .map_err(|_| WormholeError::InvalidVAAFormat)? + ts_bytes + .try_into() + .map_err(|_| WormholeError::InvalidVAAFormat)?, ); cursor += 4; - + // Get nonce - let nonce_bytes = encoded_vaa.get(cursor..cursor + 4) + let nonce_bytes = encoded_vaa + .get(cursor..cursor + 4) .ok_or(WormholeError::InvalidVAAFormat)?; let nonce = u32::from_be_bytes( - nonce_bytes.try_into() - .map_err(|_| WormholeError::InvalidVAAFormat)? + nonce_bytes + .try_into() + .map_err(|_| WormholeError::InvalidVAAFormat)?, ); cursor += 4; - + // Get emitter chain ID - let emitter_chain_bytes = encoded_vaa.get(cursor..cursor + 2) + let emitter_chain_bytes = encoded_vaa + .get(cursor..cursor + 2) .ok_or(WormholeError::InvalidVAAFormat)?; - let emitter_chain_id = u16::from_be_bytes([ - emitter_chain_bytes[0], - emitter_chain_bytes[1], - ]); + let emitter_chain_id = u16::from_be_bytes([emitter_chain_bytes[0], emitter_chain_bytes[1]]); cursor += 2; - + // Get emitter address - let emitter_address_bytes = encoded_vaa.get(cursor..cursor + 32) + let emitter_address_bytes = encoded_vaa + .get(cursor..cursor + 32) .ok_or(WormholeError::InvalidVAAFormat)?; let mut fixed_emitter = [0u8; 32]; fixed_emitter.copy_from_slice(emitter_address_bytes); cursor += 32; - + // Get sequence - let sequence_bytes = encoded_vaa.get(cursor..cursor + 8) + let sequence_bytes = encoded_vaa + .get(cursor..cursor + 8) .ok_or(WormholeError::InvalidVAAFormat)?; let sequence = u64::from_be_bytes( - sequence_bytes.try_into() - .map_err(|_| WormholeError::InvalidVAAFormat)? + sequence_bytes + .try_into() + .map_err(|_| WormholeError::InvalidVAAFormat)?, ); cursor += 8; - + // Get consistency level - let consistency_level = *encoded_vaa.get(cursor) + let consistency_level = *encoded_vaa + .get(cursor) .ok_or(WormholeError::InvalidVAAFormat)?; cursor += 1; - + // Get payload - let payload = encoded_vaa.get(cursor..) + let payload = encoded_vaa + .get(cursor..) .ok_or(WormholeError::InvalidVAAFormat)? .to_vec(); - + let hash = keccak256(&encoded_vaa[cursor - 51..]); - + Ok(VerifiedVM { version: *version, guardian_set_index, @@ -311,20 +328,26 @@ impl WormholeContract { hash, }) } - fn verify_vm(&self, vaa: &VerifiedVM) -> Result<(), WormholeError> { let guardian_set = self.get_gs_internal(vaa.guardian_set_index)?; - if vaa.guardian_set_index != self.current_guardian_set_index.get().try_into().unwrap_or(0u32) - && guardian_set.expiration_time > 0 { - return Err(WormholeError::GuardianSetExpired) + let current_gsi = self.current_guardian_set_index.get().try_into().unwrap_or(0u32); + if vaa.guardian_set_index != current_gsi && guardian_set.expiration_time > 0 { + return Err(WormholeError::GuardianSetExpired); } - - let num_guardians : u32 = guardian_set.keys.len().try_into().map_err(|_| WormholeError::InvalidInput)?; + let num_guardians: u32 = guardian_set + .keys + .len() + .try_into() + .map_err(|_| WormholeError::InvalidInput)?; let required_signatures = Self::quorum(num_guardians); - let num_signatures : u32 = vaa.signatures.len().try_into().map_err(|_| WormholeError::InvalidInput)?; + let num_signatures: u32 = vaa + .signatures + .len() + .try_into() + .map_err(|_| WormholeError::InvalidInput)?; if num_signatures < required_signatures { return Err(WormholeError::InsufficientSignatures); @@ -353,7 +376,7 @@ impl WormholeContract { let hashed_vaa_hash: FixedBytes<32> = FixedBytes::from(keccak256(vaa.hash)); match self.verify_signature(&hashed_vaa_hash, &signature.signature, guardian_address) { - Ok(true) => {}, + Ok(true) => {} Ok(false) => return Err(WormholeError::InvalidSignature.into()), Err(e) => return Err(e), } @@ -367,16 +390,26 @@ impl WormholeContract { U256::from_be_bytes(keccak256(&key_data).0) } - fn store_gs(&mut self, set_index: u32, guardians: Vec
, expiration_time: u32) -> Result<(), WormholeError> { + fn store_gs( + &mut self, + set_index: u32, + guardians: Vec
, + expiration_time: u32, + ) -> Result<(), WormholeError> { if guardians.is_empty() { return Err(WormholeError::InvalidInput); } - self.guardian_set_sizes.setter(U256::from(set_index)).set(U256::from(guardians.len())); - self.guardian_set_expiry.setter(U256::from(set_index)).set(U256::from(expiration_time)); + self.guardian_set_sizes + .setter(U256::from(set_index)) + .set(U256::from(guardians.len())); + self.guardian_set_expiry + .setter(U256::from(set_index)) + .set(U256::from(expiration_time)); for (i, guardian) in guardians.iter().enumerate() { - let i_u8: u8 = i.try_into() + let i_u8: u8 = i + .try_into() .map_err(|_| WormholeError::InvalidGuardianIndex)?; let key = self.compute_gs_key(set_index, i_u8); self.guardian_keys.setter(key).set(*guardian); @@ -400,27 +433,33 @@ impl WormholeContract { RecoveryId::try_from(recovery_id_byte - 27) .map_err(|_| WormholeError::InvalidSignature)? } else { - RecoveryId::try_from(recovery_id_byte) - .map_err(|_| WormholeError::InvalidSignature)? + RecoveryId::try_from(recovery_id_byte).map_err(|_| WormholeError::InvalidSignature)? }; - let sig = Signature::try_from(&signature[..64]) - .map_err(|_| WormholeError::InvalidSignature)?; + let sig = + Signature::try_from(&signature[..64]).map_err(|_| WormholeError::InvalidSignature)?; - let verifying_key = VerifyingKey::recover_from_prehash(hash.as_slice().try_into().map_err(|_| WormholeError::InvalidInput)?, &sig, recovery_id) - .map_err(|_| WormholeError::InvalidSignature)?; + let verifying_key = VerifyingKey::recover_from_prehash( + hash.as_slice() + .try_into() + .map_err(|_| WormholeError::InvalidInput)?, + &sig, + recovery_id, + ) + .map_err(|_| WormholeError::InvalidSignature)?; let public_key_bytes = verifying_key.to_encoded_point(false); let public_key_slice = &public_key_bytes.as_bytes()[1..]; let address_hash = keccak256(public_key_slice); - let address_bytes: [u8; 20] = address_hash[12..].try_into() + let address_bytes: [u8; 20] = address_hash[12..] + .try_into() .map_err(|_| WormholeError::InvalidAddressLength)?; Ok(Address::from(address_bytes) == guardian_address) } - fn get_gs_internal(&self, index: u32) -> Result { + fn get_gs_internal(&self, index: u32) -> Result { let size = self.guardian_set_sizes.getter(U256::from(index)).get(); if size.is_zero() { return Err(WormholeError::InvalidGuardianSetIndex); @@ -457,6 +496,7 @@ impl IWormhole for WormholeContract { let vaa = self.parse_vm(&encoded_vaa)?; self.verify_vm(&vaa)?; + Ok(vaa) } @@ -465,14 +505,16 @@ impl IWormhole for WormholeContract { } fn get_current_guardian_set_index(&self) -> u32 { - self.current_guardian_set_index.get().try_into().unwrap_or(0u32) + self.current_guardian_set_index + .get() + .try_into() + .unwrap_or(0u32) } fn governance_action_is_consumed(&self, hash: Vec) -> bool { self.consumed_governance_actions.get(hash) } - #[inline] fn chain_id(&self) -> u16 { self.chain_id.get().try_into().unwrap_or(0u16) } @@ -502,10 +544,10 @@ mod tests { use k256::ecdsa::SigningKey; use stylus_sdk::alloy_primitives::keccak256; - #[cfg(test)] - use base64::engine::general_purpose; #[cfg(test)] use base64::Engine; + #[cfg(test)] + use base64::engine::general_purpose; const CHAIN_ID: u16 = 60051; const GOVERNANCE_CHAIN_ID: u16 = 1; @@ -518,9 +560,7 @@ mod tests { #[cfg(test)] fn create_vaa_bytes(input_string: &str) -> Vec { - let vaa_bytes = general_purpose::STANDARD - .decode(input_string) - .unwrap(); + let vaa_bytes = general_purpose::STANDARD.decode(input_string).unwrap(); let vaa: Vec = vaa_bytes; vaa } @@ -528,20 +568,18 @@ mod tests { #[cfg(test)] fn test_guardian_secret1() -> [u8; 32] { [ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, + 0x1d, 0x1e, 0x1f, 0x20, ] } #[cfg(test)] fn test_guardian_secret2() -> [u8; 32] { [ - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, - 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, + 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, + 0x3d, 0x3e, 0x3f, 0x40, ] } @@ -596,8 +634,6 @@ mod tests { ] } - - #[cfg(test)] fn test_guardian_address1() -> Address { let secret = test_guardian_secret1(); @@ -610,7 +646,6 @@ mod tests { Address::from(address_bytes) } - #[cfg(test)] fn test_guardian_address2() -> Address { let secret = test_guardian_secret2(); @@ -677,8 +712,14 @@ mod tests { #[cfg(test)] fn mock_guardian_set13() -> Vec
{ vec![ - Address::from([0x58, 0x93, 0xB5, 0xA7, 0x6c, 0x3f, 0x73, 0x96, 0x45, 0x64, 0x88, 0x85, 0xbD, 0xCc, 0xC0, 0x6c, 0xd7, 0x0a, 0x3C, 0xd3]), - Address::from([0xff, 0x6C, 0xB9, 0x52, 0x58, 0x9B, 0xDE, 0x86, 0x2c, 0x25, 0xEf, 0x43, 0x92, 0x13, 0x2f, 0xb9, 0xD4, 0xA4, 0x21, 0x57]), + Address::from([ + 0x58, 0x93, 0xB5, 0xA7, 0x6c, 0x3f, 0x73, 0x96, 0x45, 0x64, 0x88, 0x85, 0xbD, 0xCc, + 0xC0, 0x6c, 0xd7, 0x0a, 0x3C, 0xd3, + ]), + Address::from([ + 0xff, 0x6C, 0xB9, 0x52, 0x58, 0x9B, 0xDE, 0x86, 0x2c, 0x25, 0xEf, 0x43, 0x92, 0x13, + 0x2f, 0xb9, 0xD4, 0xA4, 0x21, 0x57, + ]), ] } @@ -711,7 +752,11 @@ mod tests { } } - fn create_test_vaa_with_emitter(guardian_set_index: u32, signatures: Vec, emitter: Address) -> VerifiedVM { + fn create_test_vaa_with_emitter( + guardian_set_index: u32, + signatures: Vec, + emitter: Address, + ) -> VerifiedVM { let mut emitter_bytes = [0u8; 32]; emitter_bytes[12..32].copy_from_slice(emitter.as_slice()); @@ -730,7 +775,10 @@ mod tests { } } - fn create_valid_guardian_signature(guardian_index: u8, hash: &FixedBytes<32>) -> Result { + fn create_valid_guardian_signature( + guardian_index: u8, + hash: &FixedBytes<32>, + ) -> Result { let secret_bytes = match guardian_index { 0 => test_guardian_secret1(), 1 => test_guardian_secret2(), @@ -740,10 +788,13 @@ mod tests { let signing_key = SigningKey::from_bytes(&secret_bytes.into()) .map_err(|_| WormholeError::InvalidInput)?; - let hash_array: [u8; 32] = hash.as_slice().try_into() + let hash_array: [u8; 32] = hash + .as_slice() + .try_into() .map_err(|_| WormholeError::InvalidInput)?; - let (signature, recovery_id) = signing_key.sign_prehash_recoverable(&hash_array) + let (signature, recovery_id) = signing_key + .sign_prehash_recoverable(&hash_array) .map_err(|_| WormholeError::InvalidInput)?; let mut sig_bytes = [0u8; 65]; @@ -1113,7 +1164,6 @@ mod tests { let gov_contract = wormhole_contract.sender(alice).governance_contract(); let expected = Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); assert_eq!(gov_contract, expected); - } #[motsu::test] @@ -1127,7 +1177,8 @@ mod tests { #[motsu::test] fn test_initialize_contract_like_shell_script(wormhole_contract: Contract, alice: Address) { let guardians = current_guardians(); - let governance_contract = Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); + let governance_contract = + Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); let result = wormhole_contract.sender(alice).initialize(guardians.clone(), 4, CHAIN_ID, GOVERNANCE_CHAIN_ID, governance_contract); assert!(result.is_ok(), "Contract initialization should succeed"); @@ -1136,13 +1187,17 @@ mod tests { #[motsu::test] fn test_quorum_calculation_integration_test(wormhole_contract: Contract, alice: Address) { let quorum_result = WormholeContract::quorum(3); - assert_eq!(quorum_result, 3, "Quorum calculation should work: (3 * 2) / 3 + 1 = 3"); + assert_eq!( + quorum_result, 3, + "Quorum calculation should work: (3 * 2) / 3 + 1 = 3" + ); } #[motsu::test] fn test_guardian_set_retrieval_current_guardians(wormhole_contract: Contract, alice: Address) { let guardians = current_guardians(); - let governance_contract = Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); + let governance_contract = + Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); let _ = wormhole_contract.sender(alice).initialize(guardians.clone(), 4, CHAIN_ID, GOVERNANCE_CHAIN_ID, governance_contract); @@ -1150,7 +1205,11 @@ mod tests { assert!(guardian_set_result.is_ok(), "Guardian set retrieval should work - contract is initialized"); let guardian_set_bytes = guardian_set_result.unwrap(); - assert_eq!(guardian_set_bytes.len(), 19 * 20, "Should have 19 guardian addresses (20 bytes each)"); + assert_eq!( + guardian_set_bytes.len(), + 19 * 20, + "Should have 19 guardian addresses (20 bytes each)" + ); assert_eq!(wormhole_contract.sender(alice).chain_id(), CHAIN_ID, "Chain ID should match shell script value"); @@ -1164,7 +1223,8 @@ mod tests { #[motsu::test] fn test_duplicate_verification(wormhole_contract: Contract, alice: Address) { let guardians = current_guardians_duplicate(); - let governance_contract = Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); + let governance_contract = + Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); let _ = wormhole_contract.sender(alice).initialize(guardians.clone(), 4, CHAIN_ID, GOVERNANCE_CHAIN_ID, governance_contract); @@ -1179,7 +1239,8 @@ mod tests { #[motsu::test] fn switch_guardian_set(wormhole_contract: Contract, alice: Address) { let guardians = current_guardians_duplicate(); - let governance_contract = Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); + let governance_contract = + Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); let _ = wormhole_contract.sender(alice).initialize(guardians.clone(), 3, CHAIN_ID, GOVERNANCE_CHAIN_ID, governance_contract); let test_vaa = create_vaa_bytes("AQAAAAQNAInUwKI1ItLfYeLaAibn9oXaouTs9BL3Aa9DKCFWrLu0KDaQQMQJlih0Qh7l7yH2o6kD/g9RCmRwZJ6q0OZE0t4AArCSH1wpX04N1U59tQmss2xXZilimAMKlogp7ErAhAo0LFkDogqB74+2By9rm3P5OUWlbC0lrFNut5CQQV38DGsAAxO+1nUTUc842P2afDSjdWcmjvJl2s8secQzuiW8zrdgPpbzhzWsiYXizLQBRKigDS8pWGD4vRk0fuR8H/ZkO/0BBOmDobl1BLNJx7+Pt+NWfuOUBipVFIXGxI9b3vxxH0BIec8hhxDN4m2Pd2I0klGEXKhv9plcR7VlzAsaC7ZE7QIABh4ff66tP7EHdVfZR4mTzv5B97agMcSB1eDeijpyl9JuBhbMupw7nExZNnZag/x2k6AUEWnQnfp8AoaCK7Av+icAB2Ouk9mPd1ybyju39Q8m7GMevt2f1nHVyWVsPRzdEcCuAbzjh5137DCLzVWuFUujTQJ7IJiznQb6cm2Ljk3WOXUACMa/JwRdpVKZf6eTD6O6tivqhdhMtbijlPBZX/kgVKk5Xuyv3h1SRTrNCwkMg5XOWegnCbXqjbUlo+F3qTjCalQBCxfp1itJskZmv+SXA47QivURKWzGa3mntNh0vcAXYi8FeChvoUYmfYpejmBlOkD1I73pmUsyrbYbetHa7qFu3eoBDZScdyrWp2dS5Y9L4b0who/PncVp5oFs/4J8ThHNQoXWXvys+nUc2aM+E+Fwazo2ODdI8XZz9YOGf/ZfE6iXFBYBDgckow8Nb2QD//C6MfP2Bz8zftqvt+D6Dko7v/Inb2OtCj342yjrxcvAMlCQ6lYoTIAMNemzNoqlfNyDMdB9yKoAEKebRtCm8QZSjLQ5uPk8aoQpmNwCpLhiHuzh2fqH55fcQrE6/KFttfw7VzeGUE7k3PF6xIMq0BPr3vkG2MedIh8BEQvpmYK4fChLY5JG26Kk6KuZ1eCkJAOQgdSjWasAvNgsSIlsb5mFjIkGwK9j20svLSl+OJ7I0olefXcZ2JywjgYAEu1jITMLHCMR1blXENulhApdhMfTef1aQ/USMqRVWNigausEzq49Hi2GtcQzHmZuhgnhBZEnjq9K8jsZwJk59iwBaFxZegAAAAAAATTNxrJiPzbWCugg6Vtg92ToHsLNO1e3fj+OJd3UOsNzAAAAAAATpFIAAVE6cNLnZT2Noq5nJ4VNRSf2KrRBNrlimFaXauHv3efDAAFm5RiKEwih25C20x8/vcqMPfJnjIES3909GSxaPMRXqAAAAAAAAAAAAAAAAFxIFHGlrpnuxd5M5WePQalLpUyHAB4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALFcwAAAAAAAAAAAAAAAAaFxdzQAAAAAAAAAAAAAAAK3MabLDE8LWvGN6+AdUvFHJdm5RAAMAAAAAAAAAAAAAAADf0SJhChSsEtk0iYwC2+wfcnCBFg=="); @@ -1199,6 +1260,4 @@ mod tests { let result2 = wormhole_contract.sender(alice).parse_and_verify_vm(test_vaa.clone()); assert!(result2.is_ok()); } - - } diff --git a/target_chains/stylus/contracts/wormhole/tests/integration_test.rs b/target_chains/stylus/contracts/wormhole/tests/integration_test.rs index e03db007c6..d058f3783d 100644 --- a/target_chains/stylus/contracts/wormhole/tests/integration_test.rs +++ b/target_chains/stylus/contracts/wormhole/tests/integration_test.rs @@ -5,10 +5,12 @@ use core::str::FromStr; use stylus_sdk::alloy_primitives::{Address, FixedBytes, U256}; use motsu::prelude::Contract; use k256::ecdsa::SigningKey; +use motsu::prelude::DefaultStorage; use stylus_sdk::alloy_primitives::keccak256; +use wormhole_contract::*; -use base64::engine::general_purpose; use base64::Engine; +use base64::engine::general_purpose; const CHAIN_ID: u16 = 60051; const GOVERNANCE_CHAIN_ID: u16 = 1; @@ -19,28 +21,24 @@ fn test_wormhole_vaa() -> Vec { } fn create_vaa_bytes(input_string: &str) -> Vec { - let vaa_bytes = general_purpose::STANDARD - .decode(input_string) - .unwrap(); + let vaa_bytes = general_purpose::STANDARD.decode(input_string).unwrap(); let vaa: Vec = vaa_bytes; vaa } fn test_guardian_secret1() -> [u8; 32] { [ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, - 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, - 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, + 0x1f, 0x20, ] } fn test_guardian_secret2() -> [u8; 32] { [ - 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, - 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, - 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, - 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, + 0x3f, 0x40, ] } @@ -104,8 +102,6 @@ fn test_guardian_address1() -> Address { Address::from(address_bytes) } - - fn test_guardian_address2() -> Address { let secret = test_guardian_secret2(); let signing_key = SigningKey::from_bytes(&secret.into()).expect("key"); @@ -145,12 +141,10 @@ fn deploy_with_mainnet_guardians(wormhole_contract: &Contract, wormhole_contract.sender(*alice).initialize(guardians, 0, CHAIN_ID, GOVERNANCE_CHAIN_ID, governance_contract).unwrap(); } - fn guardian_set0() -> Vec
{ vec![Address::from_str("0x58CC3AE5C097b213cE3c81979e1B9f9570746AA5").unwrap()] } - fn guardian_set4() -> Vec
{ vec![ Address::from_str("0x5893B5A76c3f739645648885bDCcC06cd70a3Cd3").unwrap(), @@ -159,15 +153,19 @@ fn guardian_set4() -> Vec
{ ] } - fn mock_guardian_set13() -> Vec
{ vec![ - Address::from([0x58, 0x93, 0xB5, 0xA7, 0x6c, 0x3f, 0x73, 0x96, 0x45, 0x64, 0x88, 0x85, 0xbD, 0xCc, 0xC0, 0x6c, 0xd7, 0x0a, 0x3C, 0xd3]), - Address::from([0xff, 0x6C, 0xB9, 0x52, 0x58, 0x9B, 0xDE, 0x86, 0x2c, 0x25, 0xEf, 0x43, 0x92, 0x13, 0x2f, 0xb9, 0xD4, 0xA4, 0x21, 0x57]), + Address::from([ + 0x58, 0x93, 0xB5, 0xA7, 0x6c, 0x3f, 0x73, 0x96, 0x45, 0x64, 0x88, 0x85, 0xbD, 0xCc, + 0xC0, 0x6c, 0xd7, 0x0a, 0x3C, 0xd3, + ]), + Address::from([ + 0xff, 0x6C, 0xB9, 0x52, 0x58, 0x9B, 0xDE, 0x86, 0x2c, 0x25, 0xEf, 0x43, 0x92, 0x13, + 0x2f, 0xb9, 0xD4, 0xA4, 0x21, 0x57, + ]), ] } - fn corrupted_vaa(mut real_data: Vec, pos: usize, random1: u8, random2: u8) -> Vec { if real_data.len() < 2 { return real_data; @@ -196,7 +194,11 @@ fn create_test_vaa(guardian_set_index: u32, signatures: Vec) } } -fn create_test_vaa_with_emitter(guardian_set_index: u32, signatures: Vec, emitter: Address) -> VerifiedVM { +fn create_test_vaa_with_emitter( + guardian_set_index: u32, + signatures: Vec, + emitter: Address, +) -> VerifiedVM { let mut emitter_bytes = [0u8; 32]; emitter_bytes[12..32].copy_from_slice(emitter.as_slice()); @@ -215,20 +217,26 @@ fn create_test_vaa_with_emitter(guardian_set_index: u32, signatures: Vec) -> Result { +fn create_valid_guardian_signature( + guardian_index: u8, + hash: &FixedBytes<32>, +) -> Result { let secret_bytes = match guardian_index { 0 => test_guardian_secret1(), 1 => test_guardian_secret2(), _ => test_guardian_secret1(), }; - let signing_key = SigningKey::from_bytes(&secret_bytes.into()) - .map_err(|_| WormholeError::InvalidInput)?; + let signing_key = + SigningKey::from_bytes(&secret_bytes.into()).map_err(|_| WormholeError::InvalidInput)?; - let hash_array: [u8; 32] = hash.as_slice().try_into() + let hash_array: [u8; 32] = hash + .as_slice() + .try_into() .map_err(|_| WormholeError::InvalidInput)?; - let (signature, recovery_id) = signing_key.sign_prehash_recoverable(&hash_array) + let (signature, recovery_id) = signing_key + .sign_prehash_recoverable(&hash_array) .map_err(|_| WormholeError::InvalidInput)?; let mut sig_bytes = [0u8; 65]; @@ -241,7 +249,6 @@ fn create_valid_guardian_signature(guardian_index: u8, hash: &FixedBytes<32>) -> }) } - fn create_guardian_signature(guardian_index: u8) -> GuardianSignature { GuardianSignature { guardian_index, @@ -353,7 +360,6 @@ fn test_chain_id_governance_values(wormhole_contract: Contract let gov_contract = wormhole_contract.sender(alice).governance_contract(); let expected = Address::from_slice(&GOVERNANCE_CONTRACT.to_be_bytes::<32>()[12..32]); assert_eq!(gov_contract, expected); - } #[motsu::test] @@ -384,7 +390,11 @@ fn test_guardian_set_retrieval_current_guardians(wormhole_contract: Contract, al let result = wormhole_contract.sender(alice).parse_and_verify_vm(test_vaa); println!("result: {:?}", result); assert!(result.is_err()); -} +} \ No newline at end of file diff --git a/target_chains/sui/sdk/js/README.md b/target_chains/sui/sdk/js/README.md index bbfbf8f68e..e2532e6064 100644 --- a/target_chains/sui/sdk/js/README.md +++ b/target_chains/sui/sdk/js/README.md @@ -22,7 +22,7 @@ Pyth prices and submit them to the network: ```typescript const connection = new SuiPriceServiceConnection( - "https://hermes-beta.pyth.network" + "https://hermes-beta.pyth.network", ); // See Hermes endpoints section below for other endpoints const priceIds = [ @@ -104,8 +104,9 @@ You can run this example with `pnpm turbo --filter @pythnetwork/pyth-sui-js run ```bash export SUI_KEY=YOUR_PRIV_KEY; -pnpm turbo --filter @pythnetwork/pyth-sui-js run example-relay -- --feed-id "5a035d5440f5c163069af66062bac6c79377bf88396fa27e6067bfca8096d280" \ ---price-service "https://hermes-beta.pyth.network" \ +pnpm turbo run example-relay --filter @pythnetwork/pyth-sui-js -- \ +--feed-id "5a035d5440f5c163069af66062bac6c79377bf88396fa27e6067bfca8096d280" \ +--hermes "https://hermes-beta.pyth.network" \ --full-node "https://fullnode.testnet.sui.io:443" \ --pyth-state-id "0xd3e79c2c083b934e78b3bd58a490ec6b092561954da6e7322e1e2b3c8abfddc0" \ --wormhole-state-id "0x31358d198147da50db32eda2562951d53973a0c0ad5ed738e9b17d88b213d790" @@ -136,7 +137,7 @@ This method is useful if you want to show continuously updating real-time prices // gets a price update. connection.subscribePriceFeedUpdates(priceIds, (priceFeed) => { console.log( - `Received update for ${priceFeed.id}: ${priceFeed.getPriceNoOlderThan(60)}` + `Received update for ${priceFeed.id}: ${priceFeed.getPriceNoOlderThan(60)}`, ); }); diff --git a/target_chains/sui/sdk/js/package.json b/target_chains/sui/sdk/js/package.json index 881cd69ebb..8a401ed5f1 100644 --- a/target_chains/sui/sdk/js/package.json +++ b/target_chains/sui/sdk/js/package.json @@ -1,6 +1,6 @@ { "name": "@pythnetwork/pyth-sui-js", - "version": "2.1.0", + "version": "2.2.0", "description": "Pyth Network Sui Utilities", "homepage": "https://pyth.network", "author": { diff --git a/target_chains/sui/sdk/js/src/client.ts b/target_chains/sui/sdk/js/src/client.ts index 1d49fa6ae9..a047c0e983 100644 --- a/target_chains/sui/sdk/js/src/client.ts +++ b/target_chains/sui/sdk/js/src/client.ts @@ -6,6 +6,10 @@ import { HexString } from "@pythnetwork/price-service-client"; import { Buffer } from "buffer"; const MAX_ARGUMENT_SIZE = 16 * 1024; +type NestedTransactionResult = { + $kind: "NestedResult"; + NestedResult: [number, number]; +}; export type ObjectId = string; export class SuiPythClient { @@ -104,20 +108,11 @@ export class SuiPythClient { return verifiedVaas; } - /** - * Adds the necessary commands for updating the pyth price feeds to the transaction block. - * @param tx transaction block to add commands to - * @param updates array of price feed updates received from the price service - * @param feedIds array of feed ids to update (in hex format) - */ - async updatePriceFeeds( + async verifyVaasAndGetHotPotato( tx: Transaction, updates: Buffer[], - feedIds: HexString[], - ): Promise { - const packageId = await this.getPythPackageId(); - - let priceUpdatesHotPotato; + packageId: string, + ): Promise { if (updates.length > 1) { throw new Error( "SDK does not support sending multiple accumulator messages in a single transaction", @@ -125,7 +120,7 @@ export class SuiPythClient { } const vaa = this.extractVaaBytesFromAccumulatorMessage(updates[0]); const verifiedVaas = await this.verifyVaas([vaa], tx); - [priceUpdatesHotPotato] = tx.moveCall({ + const [priceUpdatesHotPotato] = tx.moveCall({ target: `${packageId}::pyth::create_authenticated_price_infos_using_accumulator`, arguments: [ tx.object(this.pythStateId), @@ -141,13 +136,17 @@ export class SuiPythClient { tx.object(SUI_CLOCK_OBJECT_ID), ], }); + return priceUpdatesHotPotato; + } + async executePriceFeedUpdates( + tx: Transaction, + packageId: string, + feedIds: HexString[], + priceUpdatesHotPotato: any, + coins: NestedTransactionResult[], + ) { const priceInfoObjects: ObjectId[] = []; - const baseUpdateFee = await this.getBaseUpdateFee(); - const coins = tx.splitCoins( - tx.gas, - feedIds.map(() => tx.pure.u64(baseUpdateFee)), - ); let coinId = 0; for (const feedId of feedIds) { const priceInfoObjectId = await this.getPriceFeedObjectId(feedId); @@ -176,6 +175,69 @@ export class SuiPythClient { }); return priceInfoObjects; } + + /** + * Adds the necessary commands for updating the pyth price feeds to the transaction block. + * @param tx transaction block to add commands to + * @param updates array of price feed updates received from the price service + * @param feedIds array of feed ids to update (in hex format) + */ + async updatePriceFeeds( + tx: Transaction, + updates: Buffer[], + feedIds: HexString[], + ): Promise { + const packageId = await this.getPythPackageId(); + const priceUpdatesHotPotato = await this.verifyVaasAndGetHotPotato( + tx, + updates, + packageId, + ); + + const baseUpdateFee = await this.getBaseUpdateFee(); + const coins = tx.splitCoins( + tx.gas, + feedIds.map(() => tx.pure.u64(baseUpdateFee)), + ); + + return await this.executePriceFeedUpdates( + tx, + packageId, + feedIds, + priceUpdatesHotPotato, + coins, + ); + } + + /** + * Updates price feeds using the coin input for payment. Coins can be generated by calling splitCoin on tx.gas. + * @param tx transaction block to add commands to + * @param updates array of price feed updates received from the price service + * @param feedIds array of feed ids to update (in hex format) + * @param coins array of Coins for payment of update operations + */ + async updatePriceFeedsWithCoins( + tx: Transaction, + updates: Buffer[], + feedIds: HexString[], + coins: NestedTransactionResult[], + ): Promise { + const packageId = await this.getPythPackageId(); + const priceUpdatesHotPotato = await this.verifyVaasAndGetHotPotato( + tx, + updates, + packageId, + ); + + return await this.executePriceFeedUpdates( + tx, + packageId, + feedIds, + priceUpdatesHotPotato, + coins, + ); + } + async createPriceFeed(tx: Transaction, updates: Buffer[]) { const packageId = await this.getPythPackageId(); if (updates.length > 1) {