diff --git a/Cargo.lock b/Cargo.lock index 00638f6973a..ffea732c3be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2726,9 +2726,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http", @@ -2755,9 +2755,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", @@ -2770,7 +2770,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2", "tokio", "tower-service", "tracing", @@ -6020,16 +6020,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.5" @@ -6719,7 +6709,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2", "tokio-macros", "windows-sys 0.48.0", ] @@ -9053,12 +9043,20 @@ version = "0.1.0" dependencies = [ "anyhow", "axum", + "chrono", + "hyper", + "multivm", + "serde_json", "tokio", + "tower", "tracing", + "zksync_basic_types", "zksync_config", + "zksync_contracts", "zksync_dal", "zksync_object_store", "zksync_prover_interface", + "zksync_tee_verifier", "zksync_types", ] @@ -9124,6 +9122,7 @@ dependencies = [ "chrono", "circuit_sequencer_api 0.1.50", "serde", + "serde_json", "serde_with", "strum", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 77af41c6372..de664288e15 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,6 +116,7 @@ google-cloud-storage = "0.15.0" governor = "0.4.2" hex = "0.4" http = "0.2.9" +hyper = "0.14.29" iai = "0.1" insta = "1.29.0" itertools = "0.10" diff --git a/checks-config/era.dic b/checks-config/era.dic index 3741e158dfa..a93a467f956 100644 --- a/checks-config/era.dic +++ b/checks-config/era.dic @@ -973,3 +973,11 @@ uncached untrimmed UNNEST semver +TeeRequestProcessor +l1_batch_number +RequestProcessorError +map_err +proof_inputs +submit_proofs +ready_to_be_proven +privkey diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index 06d672b40d7..de7f6969b05 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -6,6 +6,7 @@ use serde::Deserialize; pub struct ProofDataHandlerConfig { pub http_port: u16, pub proof_generation_timeout_in_secs: u16, + pub tee_support: bool, } impl ProofDataHandlerConfig { diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 87c3bd2a129..3feee2a29ec 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -626,6 +626,7 @@ impl Distribution for EncodeDist { configs::ProofDataHandlerConfig { http_port: self.sample(rng), proof_generation_timeout_in_secs: self.sample(rng), + tee_support: self.sample(rng), } } } diff --git a/core/lib/dal/.sqlx/query-37890022be6b5e893cf051266fa16618914d6dedb39a9a40d36484741e8b01f4.json b/core/lib/dal/.sqlx/query-37890022be6b5e893cf051266fa16618914d6dedb39a9a40d36484741e8b01f4.json new file mode 100644 index 00000000000..a39a1bdb07b --- /dev/null +++ b/core/lib/dal/.sqlx/query-37890022be6b5e893cf051266fa16618914d6dedb39a9a40d36484741e8b01f4.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n tee_attestations (pubkey, attestation)\n VALUES\n ($1, $2)\n ON CONFLICT (pubkey) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "37890022be6b5e893cf051266fa16618914d6dedb39a9a40d36484741e8b01f4" +} diff --git a/core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json b/core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json new file mode 100644 index 00000000000..f0603488f1e --- /dev/null +++ b/core/lib/dal/.sqlx/query-640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = 'Successful'\n AND proofs.status = 'ready_to_be_proven'\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "640d37aa1d6dc722b1651c74b7ea2fca14965083b0589c3b3efad02e37d55f0c" +} diff --git a/core/lib/dal/.sqlx/query-6b7f66422078e9880b002da3175270e25815ca2ab720a59567da3b3b5bcedd63.json b/core/lib/dal/.sqlx/query-6b7f66422078e9880b002da3175270e25815ca2ab720a59567da3b3b5bcedd63.json new file mode 100644 index 00000000000..b7b84c323b2 --- /dev/null +++ b/core/lib/dal/.sqlx/query-6b7f66422078e9880b002da3175270e25815ca2ab720a59567da3b3b5bcedd63.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "6b7f66422078e9880b002da3175270e25815ca2ab720a59567da3b3b5bcedd63" +} diff --git a/core/lib/dal/.sqlx/query-727d4dc6a8fdb39a6c54d4395124f8d103f12e51252c46a210a007e5e600d711.json b/core/lib/dal/.sqlx/query-727d4dc6a8fdb39a6c54d4395124f8d103f12e51252c46a210a007e5e600d711.json new file mode 100644 index 00000000000..8e210aade88 --- /dev/null +++ b/core/lib/dal/.sqlx/query-727d4dc6a8fdb39a6c54d4395124f8d103f12e51252c46a210a007e5e600d711.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'generated',\n signature = $1,\n pubkey = $2,\n proof = $3,\n tee_type = $4,\n updated_at = NOW()\n WHERE\n l1_batch_number = $5\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Bytea", + "Text", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "727d4dc6a8fdb39a6c54d4395124f8d103f12e51252c46a210a007e5e600d711" +} diff --git a/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json b/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json new file mode 100644 index 00000000000..994bfcfbb5a --- /dev/null +++ b/core/lib/dal/.sqlx/query-9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n tee_proof_generation_details (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, 'ready_to_be_proven', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "9533a672ae82db344ae1070ae11b608d21dc70397b64ce500881a8b55953c59c" +} diff --git a/core/lib/dal/.sqlx/query-e2ff392b3aa7a22fc39d150d08b148b0f1c7e512dd43434062341eb263fe434f.json b/core/lib/dal/.sqlx/query-e2ff392b3aa7a22fc39d150d08b148b0f1c7e512dd43434062341eb263fe434f.json new file mode 100644 index 00000000000..4236e72fcca --- /dev/null +++ b/core/lib/dal/.sqlx/query-e2ff392b3aa7a22fc39d150d08b148b0f1c7e512dd43434062341eb263fe434f.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = 'Successful'\n AND (\n proofs.status = 'ready_to_be_proven'\n OR (\n proofs.status = 'picked_by_prover'\n AND proofs.prover_taken_at < NOW() - $1::INTERVAL\n )\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Interval" + ] + }, + "nullable": [ + false + ] + }, + "hash": "e2ff392b3aa7a22fc39d150d08b148b0f1c7e512dd43434062341eb263fe434f" +} diff --git a/core/lib/dal/migrations/20240523085604_add_tee_proof_generation_details_table.down.sql b/core/lib/dal/migrations/20240523085604_add_tee_proof_generation_details_table.down.sql new file mode 100644 index 00000000000..5b4f9958a8e --- /dev/null +++ b/core/lib/dal/migrations/20240523085604_add_tee_proof_generation_details_table.down.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS tee_attestations; +DROP TABLE IF EXISTS tee_proof_generation_details; + +DROP INDEX IF EXISTS idx_tee_proof_generation_details_status_prover_taken_at; diff --git a/core/lib/dal/migrations/20240523085604_add_tee_proof_generation_details_table.up.sql b/core/lib/dal/migrations/20240523085604_add_tee_proof_generation_details_table.up.sql new file mode 100644 index 00000000000..3a249c44346 --- /dev/null +++ b/core/lib/dal/migrations/20240523085604_add_tee_proof_generation_details_table.up.sql @@ -0,0 +1,22 @@ +CREATE TABLE IF NOT EXISTS tee_attestations +( + pubkey BYTEA PRIMARY KEY, + attestation BYTEA +); + +CREATE TABLE IF NOT EXISTS tee_proof_generation_details +( + l1_batch_number BIGINT PRIMARY KEY REFERENCES tee_verifier_input_producer_jobs (l1_batch_number) ON DELETE CASCADE, + status TEXT NOT NULL, + signature BYTEA, + pubkey BYTEA REFERENCES tee_attestations (pubkey) ON DELETE SET NULL, + proof BYTEA, + tee_type TEXT, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + prover_taken_at TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_tee_proof_generation_details_status_prover_taken_at + ON tee_proof_generation_details (prover_taken_at) + WHERE status = 'picked_by_prover'; diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 8b048a03512..45d1f94b486 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -20,7 +20,7 @@ use crate::{ snapshot_recovery_dal::SnapshotRecoveryDal, snapshots_creator_dal::SnapshotsCreatorDal, snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal, - sync_dal::SyncDal, system_dal::SystemDal, + sync_dal::SyncDal, system_dal::SystemDal, tee_proof_generation_dal::TeeProofGenerationDal, tee_verifier_input_producer_dal::TeeVerifierInputProducerDal, tokens_dal::TokensDal, tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, transactions_web3_dal::TransactionsWeb3Dal, vm_runner_dal::VmRunnerDal, @@ -50,6 +50,7 @@ pub mod storage_logs_dedup_dal; pub mod storage_web3_dal; pub mod sync_dal; pub mod system_dal; +pub mod tee_proof_generation_dal; pub mod tee_verifier_input_producer_dal; pub mod tokens_dal; pub mod tokens_web3_dal; @@ -111,6 +112,8 @@ where fn proof_generation_dal(&mut self) -> ProofGenerationDal<'_, 'a>; + fn tee_proof_generation_dal(&mut self) -> TeeProofGenerationDal<'_, 'a>; + fn system_dal(&mut self) -> SystemDal<'_, 'a>; fn snapshots_dal(&mut self) -> SnapshotsDal<'_, 'a>; @@ -213,6 +216,10 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { ProofGenerationDal { storage: self } } + fn tee_proof_generation_dal(&mut self) -> TeeProofGenerationDal<'_, 'a> { + TeeProofGenerationDal { storage: self } + } + fn system_dal(&mut self) -> SystemDal<'_, 'a> { SystemDal { storage: self } } diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 5c173475145..040b4246604 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -2,10 +2,13 @@ use std::time::Duration; use strum::{Display, EnumString}; -use zksync_db_connection::{connection::Connection, utils::pg_interval_from_duration}; +use zksync_db_connection::{ + connection::Connection, error::DalResult, instrument::Instrumented, + utils::pg_interval_from_duration, +}; use zksync_types::L1BatchNumber; -use crate::{Core, SqlxError}; +use crate::Core; #[derive(Debug)] pub struct ProofGenerationDal<'a, 'c> { @@ -28,7 +31,7 @@ impl ProofGenerationDal<'_, '_> { pub async fn get_next_block_to_be_proven( &mut self, processing_timeout: Duration, - ) -> Option { + ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); let result: Option = sqlx::query!( r#" @@ -66,15 +69,16 @@ impl ProofGenerationDal<'_, '_> { .unwrap() .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - result + Ok(result) } pub async fn save_proof_artifacts_metadata( &mut self, - block_number: L1BatchNumber, + batch_number: L1BatchNumber, proof_blob_url: &str, - ) -> Result<(), SqlxError> { - sqlx::query!( + ) -> DalResult<()> { + let batch_number = i64::from(batch_number.0); + let query = sqlx::query!( r#" UPDATE proof_generation_details SET @@ -85,22 +89,34 @@ impl ProofGenerationDal<'_, '_> { l1_batch_number = $2 "#, proof_blob_url, - i64::from(block_number.0) - ) - .execute(self.storage.conn()) - .await? - .rows_affected() - .eq(&1) - .then_some(()) - .ok_or(sqlx::Error::RowNotFound) + batch_number + ); + let instrumentation = Instrumented::new("save_proof_artifacts_metadata") + .with_arg("proof_blob_url", &proof_blob_url) + .with_arg("l1_batch_number", &batch_number); + let result = instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + if result.rows_affected() == 0 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Cannot save proof_blob_url for a batch number {} that does not exist", + batch_number + )); + return Err(err); + } + + Ok(()) } pub async fn insert_proof_generation_details( &mut self, block_number: L1BatchNumber, proof_gen_data_blob_url: &str, - ) { - sqlx::query!( + ) -> DalResult<()> { + let l1_batch_number = i64::from(block_number.0); + let query = sqlx::query!( r#" INSERT INTO proof_generation_details (l1_batch_number, status, proof_gen_data_blob_url, created_at, updated_at) @@ -108,19 +124,35 @@ impl ProofGenerationDal<'_, '_> { ($1, 'ready_to_be_proven', $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO NOTHING "#, - i64::from(block_number.0), + l1_batch_number, proof_gen_data_blob_url, - ) - .execute(self.storage.conn()) - .await - .unwrap(); + ); + let instrumentation = Instrumented::new("insert_proof_generation_details") + .with_arg("l1_batch_number", &l1_batch_number) + .with_arg("proof_gen_data_blob_url", &proof_gen_data_blob_url); + let result = instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + if result.rows_affected() == 0 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Cannot save proof_blob_url for a batch number {} that does not exist", + l1_batch_number + )); + return Err(err); + } + + Ok(()) } pub async fn mark_proof_generation_job_as_skipped( &mut self, block_number: L1BatchNumber, - ) -> Result<(), SqlxError> { - sqlx::query!( + ) -> DalResult<()> { + let status = ProofGenerationJobStatus::Skipped.to_string(); + let l1_batch_number = i64::from(block_number.0); + let query = sqlx::query!( r#" UPDATE proof_generation_details SET @@ -129,18 +161,29 @@ impl ProofGenerationDal<'_, '_> { WHERE l1_batch_number = $2 "#, - ProofGenerationJobStatus::Skipped.to_string(), - i64::from(block_number.0) - ) - .execute(self.storage.conn()) - .await? - .rows_affected() - .eq(&1) - .then_some(()) - .ok_or(sqlx::Error::RowNotFound) + status, + l1_batch_number + ); + let instrumentation = Instrumented::new("mark_proof_generation_job_as_skipped") + .with_arg("status", &status) + .with_arg("l1_batch_number", &l1_batch_number); + let result = instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + if result.rows_affected() == 0 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Cannot mark proof as skipped because batch number {} does not exist", + l1_batch_number + )); + return Err(err); + } + + Ok(()) } - pub async fn get_oldest_unpicked_batch(&mut self) -> Option { + pub async fn get_oldest_unpicked_batch(&mut self) -> DalResult> { let result: Option = sqlx::query!( r#" SELECT @@ -160,10 +203,10 @@ impl ProofGenerationDal<'_, '_> { .unwrap() .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - result + Ok(result) } - pub async fn get_oldest_not_generated_batch(&mut self) -> Option { + pub async fn get_oldest_not_generated_batch(&mut self) -> DalResult> { let result: Option = sqlx::query!( r#" SELECT @@ -183,6 +226,6 @@ impl ProofGenerationDal<'_, '_> { .unwrap() .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - result + Ok(result) } } diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs new file mode 100644 index 00000000000..d5625935fa1 --- /dev/null +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -0,0 +1,211 @@ +use std::time::Duration; + +use strum::{Display, EnumString}; +use zksync_db_connection::{ + connection::Connection, + error::DalResult, + instrument::{InstrumentExt, Instrumented}, + utils::pg_interval_from_duration, +}; +use zksync_types::L1BatchNumber; + +use crate::Core; + +#[derive(Debug)] +pub struct TeeProofGenerationDal<'a, 'c> { + pub(crate) storage: &'a mut Connection<'c, Core>, +} + +#[derive(Debug, EnumString, Display)] +enum TeeProofGenerationJobStatus { + #[strum(serialize = "ready_to_be_proven")] + ReadyToBeProven, + #[strum(serialize = "picked_by_prover")] + PickedByProver, + #[strum(serialize = "generated")] + Generated, + #[strum(serialize = "skipped")] + Skipped, +} + +#[derive(Debug, EnumString, Display)] +pub enum TeeType { + #[strum(serialize = "sgx")] + Sgx, +} + +impl TeeProofGenerationDal<'_, '_> { + pub async fn get_next_block_to_be_proven( + &mut self, + processing_timeout: Duration, + ) -> DalResult> { + let processing_timeout = pg_interval_from_duration(processing_timeout); + let result: Option = sqlx::query!( + r#" + UPDATE tee_proof_generation_details + SET + status = 'picked_by_prover', + updated_at = NOW(), + prover_taken_at = NOW() + WHERE + l1_batch_number = ( + SELECT + proofs.l1_batch_number + FROM + tee_proof_generation_details AS proofs + JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number + WHERE + inputs.status = 'Successful' + AND ( + proofs.status = 'ready_to_be_proven' + OR ( + proofs.status = 'picked_by_prover' + AND proofs.prover_taken_at < NOW() - $1::INTERVAL + ) + ) + ORDER BY + l1_batch_number ASC + LIMIT + 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING + tee_proof_generation_details.l1_batch_number + "#, + &processing_timeout, + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + + Ok(result) + } + + pub async fn save_proof_artifacts_metadata( + &mut self, + block_number: L1BatchNumber, + signature: &[u8], + pubkey: &[u8], + proof: &[u8], + tee_type: TeeType, + ) -> DalResult<()> { + let query = sqlx::query!( + r#" + UPDATE tee_proof_generation_details + SET + status = 'generated', + signature = $1, + pubkey = $2, + proof = $3, + tee_type = $4, + updated_at = NOW() + WHERE + l1_batch_number = $5 + "#, + signature, + pubkey, + proof, + tee_type.to_string(), + i64::from(block_number.0) + ); + let instrumentation = Instrumented::new("save_proof_artifacts_metadata") + .with_arg("signature", &signature) + .with_arg("pubkey", &pubkey) + .with_arg("proof", &proof) + .with_arg("tee_type", &tee_type); + let result = instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + if result.rows_affected() == 0 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Updating TEE proof for a non-existent batch number is not allowed" + )); + return Err(err); + } + + Ok(()) + } + + pub async fn insert_tee_proof_generation_job( + &mut self, + block_number: L1BatchNumber, + ) -> DalResult<()> { + let block_number = i64::from(block_number.0); + sqlx::query!( + r#" + INSERT INTO + tee_proof_generation_details (l1_batch_number, status, created_at, updated_at) + VALUES + ($1, 'ready_to_be_proven', NOW(), NOW()) + ON CONFLICT (l1_batch_number) DO NOTHING + "#, + block_number, + ) + .instrument("create_tee_proof_generation_details") + .with_arg("l1_batch_number", &block_number) + .report_latency() + .execute(self.storage) + .await?; + + Ok(()) + } + + pub async fn get_oldest_unpicked_batch(&mut self) -> DalResult> { + let result: Option = sqlx::query!( + r#" + SELECT + proofs.l1_batch_number + FROM + tee_proof_generation_details AS proofs + JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number + WHERE + inputs.status = 'Successful' + AND proofs.status = 'ready_to_be_proven' + ORDER BY + proofs.l1_batch_number ASC + LIMIT + 1 + "#, + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + + Ok(result) + } + + pub async fn save_attestation(&mut self, pubkey: &[u8], attestation: &[u8]) -> DalResult<()> { + let query = sqlx::query!( + r#" + INSERT INTO + tee_attestations (pubkey, attestation) + VALUES + ($1, $2) + ON CONFLICT (pubkey) DO NOTHING + "#, + pubkey, + attestation + ); + let instrumentation = Instrumented::new("save_attestation") + .with_arg("pubkey", &pubkey) + .with_arg("attestation", &attestation); + let result = instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + if result.rows_affected() == 0 { + let err = instrumentation.constraint_error(anyhow::anyhow!( + "Unable to insert TEE attestation: given pubkey already has an attestation assigned" + )); + return Err(err); + } + + Ok(()) + } +} diff --git a/core/lib/env_config/src/proof_data_handler.rs b/core/lib/env_config/src/proof_data_handler.rs index 53bbeb42ee6..f69aa1d6dc5 100644 --- a/core/lib/env_config/src/proof_data_handler.rs +++ b/core/lib/env_config/src/proof_data_handler.rs @@ -19,6 +19,7 @@ mod tests { ProofDataHandlerConfig { http_port: 3320, proof_generation_timeout_in_secs: 18000, + tee_support: true, } } @@ -27,6 +28,7 @@ mod tests { let config = r#" PROOF_DATA_HANDLER_PROOF_GENERATION_TIMEOUT_IN_SECS="18000" PROOF_DATA_HANDLER_HTTP_PORT="3320" + PROOF_DATA_HANDLER_TEE_SUPPORT="true" "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 8b99f976990..66cda57a0ab 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -16,6 +16,7 @@ pub enum Bucket { NodeAggregationWitnessJobsFri, SchedulerWitnessJobsFri, ProofsFri, + ProofsTee, StorageSnapshot, TeeVerifierInput, } @@ -33,6 +34,7 @@ impl Bucket { Self::NodeAggregationWitnessJobsFri => "node_aggregation_witness_jobs_fri", Self::SchedulerWitnessJobsFri => "scheduler_witness_jobs_fri", Self::ProofsFri => "proofs_fri", + Self::ProofsTee => "proofs_tee", Self::StorageSnapshot => "storage_logs_snapshots", Self::TeeVerifierInput => "tee_verifier_inputs", } diff --git a/core/lib/protobuf_config/src/proof_data_handler.rs b/core/lib/protobuf_config/src/proof_data_handler.rs index d231e5b46b7..4b7bd2fd7c3 100644 --- a/core/lib/protobuf_config/src/proof_data_handler.rs +++ b/core/lib/protobuf_config/src/proof_data_handler.rs @@ -14,6 +14,9 @@ impl ProtoRepr for proto::ProofDataHandler { proof_generation_timeout_in_secs: required(&self.proof_generation_timeout_in_secs) .and_then(|x| Ok((*x).try_into()?)) .context("proof_generation_timeout_in_secs")?, + tee_support: required(&self.tee_support) + .copied() + .context("tee_support")?, }) } @@ -21,6 +24,7 @@ impl ProtoRepr for proto::ProofDataHandler { Self { http_port: Some(this.http_port.into()), proof_generation_timeout_in_secs: Some(this.proof_generation_timeout_in_secs.into()), + tee_support: Some(this.tee_support), } } } diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index d5d131fc157..1eaf8637522 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -99,4 +99,5 @@ message WitnessVectorGenerator { message ProofDataHandler { optional uint32 http_port = 1; // required; u16 optional uint32 proof_generation_timeout_in_secs = 2; // required; s + optional bool tee_support = 3; // required } diff --git a/core/lib/prover_interface/Cargo.toml b/core/lib/prover_interface/Cargo.toml index 216eec8b985..869338a8830 100644 --- a/core/lib/prover_interface/Cargo.toml +++ b/core/lib/prover_interface/Cargo.toml @@ -10,8 +10,8 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_types.workspace = true zksync_object_store.workspace = true +zksync_types.workspace = true # We can use the newest api to send proofs to L1. circuit_sequencer_api_1_5_0.workspace = true @@ -24,3 +24,4 @@ chrono = { workspace = true, features = ["serde"] } [dev-dependencies] tokio = { workspace = true, features = ["full"] } bincode.workspace = true +serde_json.workspace = true diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index 0353c6f3924..fb96c62d38c 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -8,7 +8,12 @@ use zksync_types::{ L1BatchNumber, }; -use crate::{inputs::PrepareBasicCircuitsJob, outputs::L1BatchProofForL1}; +use crate::{ + inputs::PrepareBasicCircuitsJob, + outputs::{L1BatchProofForL1, L1BatchTeeProofForL1}, +}; + +// Structs for holding data returned in HTTP responses #[derive(Debug, Serialize, Deserialize)] pub struct ProofGenerationData { @@ -20,14 +25,29 @@ pub struct ProofGenerationData { } #[derive(Debug, Serialize, Deserialize)] -pub struct ProofGenerationDataRequest {} +pub enum GenericProofGenerationDataResponse { + Success(Option>), + Error(String), +} + +pub type ProofGenerationDataResponse = GenericProofGenerationDataResponse; #[derive(Debug, Serialize, Deserialize)] -pub enum ProofGenerationDataResponse { - Success(Option>), +pub enum SimpleResponse { + Success, Error(String), } +pub type SubmitProofResponse = SimpleResponse; +pub type RegisterTeeAttestationResponse = SimpleResponse; + +// Structs to hold data necessary for making HTTP requests + +#[derive(Debug, Serialize, Deserialize)] +pub struct ProofGenerationDataRequest {} + +pub type TeeProofGenerationDataRequest = ProofGenerationDataRequest; + #[derive(Debug, Serialize, Deserialize)] pub enum SubmitProofRequest { Proof(Box), @@ -35,8 +55,11 @@ pub enum SubmitProofRequest { SkippedProofGeneration, } -#[derive(Debug, Serialize, Deserialize)] -pub enum SubmitProofResponse { - Success, - Error(String), +#[derive(Debug, PartialEq, Serialize, Deserialize)] +pub struct SubmitTeeProofRequest(pub Box); + +#[derive(Debug, PartialEq, Serialize, Deserialize)] +pub struct RegisterTeeAttestationRequest { + pub attestation: Vec, + pub pubkey: Vec, } diff --git a/core/lib/prover_interface/src/outputs.rs b/core/lib/prover_interface/src/outputs.rs index 1ef9bb4bad2..a4035a21ec2 100644 --- a/core/lib/prover_interface/src/outputs.rs +++ b/core/lib/prover_interface/src/outputs.rs @@ -5,8 +5,7 @@ use serde::{Deserialize, Serialize}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber}; -/// The only type of proof utilized by the core subsystem: a "final" proof that can be sent -/// to the L1 contract. +/// A "final" ZK proof that can be sent to the L1 contract. #[derive(Clone, Serialize, Deserialize)] pub struct L1BatchProofForL1 { pub aggregation_result_coords: [[u8; 32]; 4], @@ -14,6 +13,18 @@ pub struct L1BatchProofForL1 { pub protocol_version: ProtocolSemanticVersion, } +/// A "final" TEE proof that can be sent to the L1 contract. +#[derive(Clone, PartialEq, Serialize, Deserialize)] +pub struct L1BatchTeeProofForL1 { + // signature generated within the TEE enclave, using the privkey corresponding to the pubkey + pub signature: Vec, + // pubkey used for signature verification; each key pair is attested by the TEE attestation + // stored in the db + pub pubkey: Vec, + // data that was signed + pub proof: Vec, +} + impl fmt::Debug for L1BatchProofForL1 { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter @@ -23,6 +34,14 @@ impl fmt::Debug for L1BatchProofForL1 { } } +impl fmt::Debug for L1BatchTeeProofForL1 { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("L1BatchTeeProofForL1") + .finish_non_exhaustive() + } +} + impl StoredObject for L1BatchProofForL1 { const BUCKET: Bucket = Bucket::ProofsFri; type Key<'a> = (L1BatchNumber, ProtocolSemanticVersion); @@ -35,3 +54,14 @@ impl StoredObject for L1BatchProofForL1 { serialize_using_bincode!(); } + +impl StoredObject for L1BatchTeeProofForL1 { + const BUCKET: Bucket = Bucket::ProofsTee; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("l1_batch_tee_proof_{key}.bin") + } + + serialize_using_bincode!(); +} diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index ffa6d18ef45..60a80f91ed8 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -1,12 +1,14 @@ //! Integration tests for object store serialization of job objects. +use circuit_sequencer_api_1_5_0::proof::FinalProof; use tokio::fs; use zksync_object_store::{Bucket, MockObjectStore}; use zksync_prover_interface::{ + api::{SubmitProofRequest, SubmitTeeProofRequest}, inputs::{PrepareBasicCircuitsJob, StorageLogMetadata}, - outputs::L1BatchProofForL1, + outputs::{L1BatchProofForL1, L1BatchTeeProofForL1}, }; -use zksync_types::L1BatchNumber; +use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber, ProtocolVersionId}; /// Tests compatibility of the `PrepareBasicCircuitsJob` serialization to the previously used /// one. @@ -66,7 +68,7 @@ async fn prepare_basic_circuits_job_compatibility() { assert_job_integrity(job_tuple.1, job_tuple.0); } -/// Simple test to check if we can succesfully parse the proof. +/// Simple test to check if we can successfully parse the proof. #[tokio::test] async fn test_final_proof_deserialization() { let proof = fs::read("./tests/l1_batch_proof_1_0_24_0.bin") @@ -76,3 +78,102 @@ async fn test_final_proof_deserialization() { let results: L1BatchProofForL1 = bincode::deserialize(&proof).unwrap(); assert_eq!(results.aggregation_result_coords[0][0], 0); } + +#[test] +fn test_proof_request_serialization() { + let proof = SubmitProofRequest::Proof(Box::new(L1BatchProofForL1 { + aggregation_result_coords: [[0; 32]; 4], + scheduler_proof: FinalProof::empty(), + protocol_version: ProtocolSemanticVersion { + minor: ProtocolVersionId::Version25, + patch: 10.into(), + }, + })); + let encoded_obj = serde_json::to_string(&proof).unwrap(); + let encoded_json = r#"{ + "Proof": { + "aggregation_result_coords": [ + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ] + ], + "scheduler_proof": { + "n": 0, + "inputs": [], + "state_polys_commitments": [], + "witness_polys_commitments": [], + "copy_permutation_grand_product_commitment": { + "x": [ 0, 0, 0, 0 ], + "y": [ 1, 0, 0, 0 ], + "infinity": true + }, + "lookup_s_poly_commitment": null, + "lookup_grand_product_commitment": null, + "quotient_poly_parts_commitments": [], + "state_polys_openings_at_z": [], + "state_polys_openings_at_dilations": [], + "witness_polys_openings_at_z": [], + "witness_polys_openings_at_dilations": [], + "gate_setup_openings_at_z": [], + "gate_selectors_openings_at_z": [], + "copy_permutation_polys_openings_at_z": [], + "copy_permutation_grand_product_opening_at_z_omega": [ 0, 0, 0, 0 ], + "lookup_s_poly_opening_at_z_omega": null, + "lookup_grand_product_opening_at_z_omega": null, + "lookup_t_poly_opening_at_z": null, + "lookup_t_poly_opening_at_z_omega": null, + "lookup_selector_poly_opening_at_z": null, + "lookup_table_type_poly_opening_at_z": null, + "quotient_poly_opening_at_z": [ 0, 0, 0, 0 ], + "linearization_poly_opening_at_z": [ 0, 0, 0, 0 ], + "opening_proof_at_z": { + "x": [ 0, 0, 0, 0 ], + "y": [ 1, 0, 0, 0 ], + "infinity": true + }, + "opening_proof_at_z_omega": { + "x": [ 0, 0, 0, 0 ], + "y": [ 1, 0, 0, 0 ], + "infinity": true + } + }, + "protocol_version": "0.25.10" + } + }"#; + let decoded_obj: SubmitProofRequest = serde_json::from_str(&encoded_obj).unwrap(); + let decoded_json: SubmitProofRequest = serde_json::from_str(encoded_json).unwrap(); + match (decoded_obj, decoded_json) { + (SubmitProofRequest::Proof(decoded_obj), SubmitProofRequest::Proof(decoded_json)) => { + assert_eq!( + decoded_obj.aggregation_result_coords, + decoded_json.aggregation_result_coords + ); + } + _ => panic!("Either decoded_obj or decoded_json is not SubmitProofRequest::Proof"), + } +} + +#[test] +fn test_tee_proof_request_serialization() { + let tee_proof_str = r#"{ + "signature": [ 0, 1, 2, 3, 4 ], + "pubkey": [ 5, 6, 7, 8, 9 ], + "proof": [ 10, 11, 12, 13, 14 ] + }"#; + let tee_proof_result = serde_json::from_str::(tee_proof_str).unwrap(); + let tee_proof_expected = SubmitTeeProofRequest(Box::new(L1BatchTeeProofForL1 { + signature: vec![0, 1, 2, 3, 4], + pubkey: vec![5, 6, 7, 8, 9], + proof: vec![10, 11, 12, 13, 14], + })); + assert_eq!(tee_proof_result, tee_proof_expected); +} diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs index 1ae03c74b45..04d823252af 100644 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs +++ b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs @@ -98,7 +98,7 @@ impl PeriodicJob for FriProverQueueReporter { let oldest_unpicked_batch = match db_conn .proof_generation_dal() .get_oldest_unpicked_batch() - .await + .await? { Some(l1_batch_number) => l1_batch_number.0 as u64, // if there is no unpicked batch in database, we use sealed batch number as a result @@ -119,7 +119,7 @@ impl PeriodicJob for FriProverQueueReporter { if let Some(l1_batch_number) = db_conn .proof_generation_dal() .get_oldest_not_generated_batch() - .await + .await? { FRI_PROVER_METRICS .oldest_not_generated_batch diff --git a/core/node/metadata_calculator/src/updater.rs b/core/node/metadata_calculator/src/updater.rs index 94aa176e87d..8271865199a 100644 --- a/core/node/metadata_calculator/src/updater.rs +++ b/core/node/metadata_calculator/src/updater.rs @@ -150,7 +150,7 @@ impl TreeUpdater { storage .proof_generation_dal() .insert_proof_generation_details(l1_batch_number, object_key) - .await; + .await?; } save_postgres_latency.observe(); tracing::info!("Updated metadata for L1 batch #{l1_batch_number} in Postgres"); diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 2e7141ea4d6..301ce0df6a8 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -14,9 +14,18 @@ zksync_config.workspace = true zksync_dal.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true +zksync_tee_verifier.workspace = true zksync_types.workspace = true - -tracing.workspace = true anyhow.workspace = true axum.workspace = true tokio.workspace = true +tracing.workspace = true + +[dev-dependencies] +hyper.workspace = true +chrono.workspace = true +multivm.workspace = true +serde_json.workspace = true +tower.workspace = true +zksync_basic_types.workspace = true +zksync_contracts.workspace = true diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs new file mode 100644 index 00000000000..f170b3b53e7 --- /dev/null +++ b/core/node/proof_data_handler/src/errors.rs @@ -0,0 +1,38 @@ +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, +}; +use zksync_dal::DalError; +use zksync_object_store::ObjectStoreError; + +pub(crate) enum RequestProcessorError { + ObjectStore(ObjectStoreError), + Dal(DalError), +} + +impl IntoResponse for RequestProcessorError { + fn into_response(self) -> Response { + let (status_code, message) = match self { + RequestProcessorError::ObjectStore(err) => { + tracing::error!("GCS error: {:?}", err); + ( + StatusCode::BAD_GATEWAY, + "Failed fetching/saving from GCS".to_owned(), + ) + } + RequestProcessorError::Dal(err) => { + tracing::error!("Sqlx error: {:?}", err); + match err.inner() { + zksync_dal::SqlxError::RowNotFound => { + (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) + } + _ => ( + StatusCode::BAD_GATEWAY, + "Failed fetching/saving from db".to_owned(), + ), + } + } + }; + (status_code, message).into_response() + } +} diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 4bd082b00dd..5a3cb2d95b6 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -2,29 +2,64 @@ use std::{net::SocketAddr, sync::Arc}; use anyhow::Context as _; use axum::{extract::Path, routing::post, Json, Router}; +use request_processor::RequestProcessor; +use tee_request_processor::TeeRequestProcessor; use tokio::sync::watch; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; -use zksync_prover_interface::api::{ProofGenerationDataRequest, SubmitProofRequest}; +use zksync_prover_interface::api::{ + ProofGenerationDataRequest, RegisterTeeAttestationRequest, SubmitProofRequest, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, +}; use zksync_types::commitment::L1BatchCommitmentMode; -use crate::request_processor::RequestProcessor; +#[cfg(test)] +mod tests; +mod errors; mod request_processor; +mod tee_request_processor; pub async fn run_server( config: ProofDataHandlerConfig, blob_store: Arc, - pool: ConnectionPool, + connection_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); tracing::debug!("Starting proof data handler server on {bind_address}"); - let get_proof_gen_processor = RequestProcessor::new(blob_store, pool, config, commitment_mode); + let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); + + axum::Server::bind(&bind_address) + .serve(app.into_make_service()) + .with_graceful_shutdown(async move { + if stop_receiver.changed().await.is_err() { + tracing::warn!("Stop signal sender for proof data handler server was dropped without sending a signal"); + } + tracing::info!("Stop signal received, proof data handler server is shutting down"); + }) + .await + .context("Proof data handler server failed")?; + tracing::info!("Proof data handler server shut down"); + Ok(()) +} + +fn create_proof_processing_router( + blob_store: Arc, + connection_pool: ConnectionPool, + config: ProofDataHandlerConfig, + commitment_mode: L1BatchCommitmentMode, +) -> Router { + let get_proof_gen_processor = RequestProcessor::new( + blob_store.clone(), + connection_pool.clone(), + config.clone(), + commitment_mode, + ); let submit_proof_processor = get_proof_gen_processor.clone(); - let app = Router::new() + let mut router = Router::new() .route( "/proof_generation_data", post( @@ -48,16 +83,43 @@ pub async fn run_server( ), ); - axum::Server::bind(&bind_address) - .serve(app.into_make_service()) - .with_graceful_shutdown(async move { - if stop_receiver.changed().await.is_err() { - tracing::warn!("Stop signal sender for proof data handler server was dropped without sending a signal"); - } - tracing::info!("Stop signal received, proof data handler server is shutting down"); - }) - .await - .context("Proof data handler server failed")?; - tracing::info!("Proof data handler server shut down"); - Ok(()) + if config.tee_support { + let get_tee_proof_gen_processor = + TeeRequestProcessor::new(blob_store, connection_pool, config.clone()); + let submit_tee_proof_processor = get_tee_proof_gen_processor.clone(); + let register_tee_attestation_processor = get_tee_proof_gen_processor.clone(); + + router = router.route( + "/tee/proof_inputs", + post( + move |payload: Json| async move { + get_tee_proof_gen_processor + .get_proof_generation_data(payload) + .await + }, + ), + ) + .route( + "/tee/submit_proofs/:l1_batch_number", + post( + move |l1_batch_number: Path, payload: Json| async move { + submit_tee_proof_processor + .submit_proof(l1_batch_number, payload) + .await + }, + ), + ) + .route( + "/tee/register_attestation", + post( + move |payload: Json| async move { + register_tee_attestation_processor + .register_tee_attestation(payload) + .await + }, + ), + ); + } + + router } diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index 582cb78f70c..170b27bb971 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -1,14 +1,9 @@ use std::sync::Arc; -use axum::{ - extract::Path, - http::StatusCode, - response::{IntoResponse, Response}, - Json, -}; +use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; -use zksync_dal::{ConnectionPool, Core, CoreDal, SqlxError}; -use zksync_object_store::{ObjectStore, ObjectStoreError}; +use zksync_dal::{ConnectionPool, Core, CoreDal}; +use zksync_object_store::ObjectStore; use zksync_prover_interface::api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, SubmitProofRequest, SubmitProofResponse, @@ -20,6 +15,8 @@ use zksync_types::{ L1BatchNumber, H256, }; +use crate::errors::RequestProcessorError; + #[derive(Clone)] pub(crate) struct RequestProcessor { blob_store: Arc, @@ -28,38 +25,6 @@ pub(crate) struct RequestProcessor { commitment_mode: L1BatchCommitmentMode, } -pub(crate) enum RequestProcessorError { - ObjectStore(ObjectStoreError), - Sqlx(SqlxError), -} - -impl IntoResponse for RequestProcessorError { - fn into_response(self) -> Response { - let (status_code, message) = match self { - RequestProcessorError::ObjectStore(err) => { - tracing::error!("GCS error: {:?}", err); - ( - StatusCode::BAD_GATEWAY, - "Failed fetching/saving from GCS".to_owned(), - ) - } - RequestProcessorError::Sqlx(err) => { - tracing::error!("Sqlx error: {:?}", err); - match err { - SqlxError::RowNotFound => { - (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) - } - _ => ( - StatusCode::BAD_GATEWAY, - "Failed fetching/saving from db".to_owned(), - ), - } - } - }; - (status_code, message).into_response() - } -} - impl RequestProcessor { pub(crate) fn new( blob_store: Arc, @@ -88,7 +53,8 @@ impl RequestProcessor { .unwrap() .proof_generation_dal() .get_next_block_to_be_proven(self.config.proof_generation_timeout()) - .await; + .await + .map_err(RequestProcessorError::Dal)?; let l1_batch_number = match l1_batch_number_result { Some(number) => number, @@ -250,7 +216,7 @@ impl RequestProcessor { .proof_generation_dal() .save_proof_artifacts_metadata(l1_batch_number, &blob_url) .await - .map_err(RequestProcessorError::Sqlx)?; + .map_err(RequestProcessorError::Dal)?; } SubmitProofRequest::SkippedProofGeneration => { self.pool @@ -260,7 +226,7 @@ impl RequestProcessor { .proof_generation_dal() .mark_proof_generation_job_as_skipped(l1_batch_number) .await - .map_err(RequestProcessorError::Sqlx)?; + .map_err(RequestProcessorError::Dal)?; } } diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs new file mode 100644 index 00000000000..957d0ef085f --- /dev/null +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -0,0 +1,122 @@ +use std::sync::Arc; + +use axum::{extract::Path, Json}; +use zksync_config::configs::ProofDataHandlerConfig; +use zksync_dal::{tee_proof_generation_dal::TeeType, ConnectionPool, Core, CoreDal}; +use zksync_object_store::ObjectStore; +use zksync_prover_interface::api::{ + GenericProofGenerationDataResponse, RegisterTeeAttestationRequest, + RegisterTeeAttestationResponse, SubmitProofResponse, SubmitTeeProofRequest, + TeeProofGenerationDataRequest, +}; +use zksync_tee_verifier::TeeVerifierInput; +use zksync_types::L1BatchNumber; + +use crate::errors::RequestProcessorError; + +pub type TeeProofGenerationDataResponse = GenericProofGenerationDataResponse; + +#[derive(Clone)] +pub(crate) struct TeeRequestProcessor { + blob_store: Arc, + pool: ConnectionPool, + config: ProofDataHandlerConfig, +} + +impl TeeRequestProcessor { + pub(crate) fn new( + blob_store: Arc, + pool: ConnectionPool, + config: ProofDataHandlerConfig, + ) -> Self { + Self { + blob_store, + pool, + config, + } + } + + pub(crate) async fn get_proof_generation_data( + &self, + request: Json, + ) -> Result, RequestProcessorError> { + tracing::info!("Received request for proof generation data: {:?}", request); + + let mut connection = self + .pool + .connection() + .await + .map_err(RequestProcessorError::Dal)?; + + let l1_batch_number_result = connection + .tee_proof_generation_dal() + .get_next_block_to_be_proven(self.config.proof_generation_timeout()) + .await + .map_err(RequestProcessorError::Dal)?; + let l1_batch_number = match l1_batch_number_result { + Some(number) => number, + None => return Ok(Json(TeeProofGenerationDataResponse::Success(None))), + }; + + let tee_verifier_input: TeeVerifierInput = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + Ok(Json(TeeProofGenerationDataResponse::Success(Some( + Box::new(tee_verifier_input), + )))) + } + + pub(crate) async fn submit_proof( + &self, + Path(l1_batch_number): Path, + Json(proof): Json, + ) -> Result, RequestProcessorError> { + let l1_batch_number = L1BatchNumber(l1_batch_number); + let mut connection = self + .pool + .connection() + .await + .map_err(RequestProcessorError::Dal)?; + let mut dal = connection.tee_proof_generation_dal(); + + tracing::info!( + "Received proof {:?} for block number: {:?}", + proof, + l1_batch_number + ); + dal.save_proof_artifacts_metadata( + l1_batch_number, + &proof.0.signature, + &proof.0.pubkey, + &proof.0.proof, + TeeType::Sgx, + ) + .await + .map_err(RequestProcessorError::Dal)?; + + Ok(Json(SubmitProofResponse::Success)) + } + + pub(crate) async fn register_tee_attestation( + &self, + Json(payload): Json, + ) -> Result, RequestProcessorError> { + tracing::info!("Received attestation: {:?}", payload); + + let mut connection = self + .pool + .connection() + .await + .map_err(RequestProcessorError::Dal)?; + let mut dal = connection.tee_proof_generation_dal(); + + dal.save_attestation(&payload.pubkey, &payload.attestation) + .await + .map_err(RequestProcessorError::Dal)?; + + Ok(Json(RegisterTeeAttestationResponse::Success)) + } +} diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs new file mode 100644 index 00000000000..7047bd154c9 --- /dev/null +++ b/core/node/proof_data_handler/src/tests.rs @@ -0,0 +1,248 @@ +use std::time::Instant; + +use axum::{ + body::Body, + http::{self, Method, Request, StatusCode}, + response::Response, + Router, +}; +use hyper::body::HttpBody; +use multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; +use serde_json::json; +use tower::ServiceExt; +use zksync_basic_types::U256; +use zksync_config::configs::ProofDataHandlerConfig; +use zksync_contracts::{BaseSystemContracts, SystemContractCode}; +use zksync_dal::{ConnectionPool, CoreDal}; +use zksync_object_store::MockObjectStore; +use zksync_prover_interface::{api::SubmitTeeProofRequest, inputs::PrepareBasicCircuitsJob}; +use zksync_tee_verifier::TeeVerifierInput; +use zksync_types::{commitment::L1BatchCommitmentMode, L1BatchNumber, H256}; + +use crate::create_proof_processing_router; + +// Test the /tee/proof_inputs endpoint by: +// 1. Mocking an object store with a single batch blob containing TEE verifier input +// 2. Populating the SQL db with relevant information about the status of the TEE verifier input and +// TEE proof generation +// 3. Sending a request to the /tee/proof_inputs endpoint and asserting that the response +// matches the file from the object store +#[tokio::test] +async fn request_tee_proof_inputs() { + // prepare a sample mocked TEE verifier input + + let batch_number = L1BatchNumber::from(1); + let tvi = TeeVerifierInput::new( + PrepareBasicCircuitsJob::new(0), + vec![], + L1BatchEnv { + previous_batch_hash: Some(H256([1; 32])), + number: batch_number, + timestamp: 0, + fee_input: Default::default(), + fee_account: Default::default(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 0, + timestamp: 0, + prev_block_hash: H256([1; 32]), + max_virtual_blocks_to_create: 0, + }, + }, + SystemEnv { + zk_porter_available: false, + version: Default::default(), + base_system_smart_contracts: BaseSystemContracts { + bootloader: SystemContractCode { + code: vec![U256([1; 4])], + hash: H256([1; 32]), + }, + default_aa: SystemContractCode { + code: vec![U256([1; 4])], + hash: H256([1; 32]), + }, + }, + bootloader_gas_limit: 0, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: 0, + chain_id: Default::default(), + }, + vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], + ); + + // populate mocked object store with a single batch blob + + let blob_store = MockObjectStore::arc(); + let object_path = blob_store.put(batch_number, &tvi).await.unwrap(); + + // get connection to the SQL db and mock the status of the TEE proof generation + + let db_conn_pool = ConnectionPool::test_pool().await; + mock_tee_batch_status(db_conn_pool.clone(), batch_number, &object_path).await; + + // test the /tee/proof_inputs endpoint; it should return the batch from the object store + + let app = create_proof_processing_router( + blob_store, + db_conn_pool, + ProofDataHandlerConfig { + http_port: 1337, + proof_generation_timeout_in_secs: 10, + tee_support: true, + }, + L1BatchCommitmentMode::Rollup, + ); + let req_body = Body::from(serde_json::to_vec(&json!({})).unwrap()); + let response = app + .oneshot( + Request::builder() + .method(Method::POST) + .uri("/tee/proof_inputs") + .header(http::header::CONTENT_TYPE, "application/json") + .body(req_body) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + + let body = response.into_body().collect().await.unwrap().to_bytes(); + let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); + let json = json + .get("Success") + .expect("Unexpected response format") + .clone(); + let deserialized: TeeVerifierInput = serde_json::from_value(json).unwrap(); + + assert_eq!(tvi, deserialized); +} + +// Test /tee/submit_proofs endpoint using a mocked TEE proof and verify response and db state +#[tokio::test] +async fn submit_tee_proof() { + let blob_store = MockObjectStore::arc(); + let db_conn_pool = ConnectionPool::test_pool().await; + let object_path = "mocked_object_path"; + let batch_number = L1BatchNumber::from(1); + + mock_tee_batch_status(db_conn_pool.clone(), batch_number, object_path).await; + + // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof + + let tee_proof_request_str = r#"{ + "signature": [ 0, 1, 2, 3, 4 ], + "pubkey": [ 5, 6, 7, 8, 9 ], + "proof": [ 10, 11, 12, 13, 14 ] + }"#; + let tee_proof_request = + serde_json::from_str::(tee_proof_request_str).unwrap(); + let uri = format!("/tee/submit_proofs/{}", batch_number.0); + let app = create_proof_processing_router( + blob_store, + db_conn_pool.clone(), + ProofDataHandlerConfig { + http_port: 1337, + proof_generation_timeout_in_secs: 10, + tee_support: true, + }, + L1BatchCommitmentMode::Rollup, + ); + + // this should fail because we haven't saved the attestation for the pubkey yet + + let response = send_submit_tee_proof_request(&app, &uri, &tee_proof_request).await; + assert_eq!(response.status(), StatusCode::BAD_GATEWAY); + + // save the attestation for the pubkey + + let attestation = [15, 16, 17, 18, 19]; + let mut proof_dal = db_conn_pool.connection().await.unwrap(); + proof_dal + .tee_proof_generation_dal() + .save_attestation(&tee_proof_request.0.pubkey, &attestation) + .await + .expect("Failed to save attestation"); + + // resend the same request; this time, it should be successful. + + let response = send_submit_tee_proof_request(&app, &uri, &tee_proof_request).await; + assert_eq!(response.status(), StatusCode::OK); + + // there should not be any batches awaiting proof in the db anymore + + let mut proof_db_conn = db_conn_pool.connection().await.unwrap(); + let oldest_batch_number = proof_db_conn + .tee_proof_generation_dal() + .get_oldest_unpicked_batch() + .await + .unwrap(); + + assert!(oldest_batch_number.is_none()); +} + +// Mock SQL db with information about the status of the TEE proof generation +async fn mock_tee_batch_status( + db_conn_pool: ConnectionPool, + batch_number: L1BatchNumber, + object_path: &str, +) { + let mut proof_db_conn = db_conn_pool.connection().await.unwrap(); + let mut proof_dal = proof_db_conn.tee_proof_generation_dal(); + let mut input_db_conn = db_conn_pool.connection().await.unwrap(); + let mut input_producer_dal = input_db_conn.tee_verifier_input_producer_dal(); + + // there should not be any batches awaiting proof in the db yet + + let oldest_batch_number = proof_dal.get_oldest_unpicked_batch().await.unwrap(); + assert!(oldest_batch_number.is_none()); + + // mock SQL table with relevant information about the status of the TEE verifier input + + input_producer_dal + .create_tee_verifier_input_producer_job(batch_number) + .await + .expect("Failed to create tee_verifier_input_producer_job"); + + // pretend that the TEE verifier input blob file was fetched successfully + + input_producer_dal + .mark_job_as_successful(batch_number, Instant::now(), object_path) + .await + .expect("Failed to mark tee_verifier_input_producer_job job as successful"); + + // mock SQL table with relevant information about the status of TEE proof generation ('ready_to_be_proven') + + proof_dal + .insert_tee_proof_generation_job(batch_number) + .await + .expect("Failed to insert tee_proof_generation_job"); + + // now, there should be one batch in the db awaiting proof + + let oldest_batch_number = proof_dal + .get_oldest_unpicked_batch() + .await + .unwrap() + .unwrap(); + assert_eq!(oldest_batch_number, batch_number); +} + +async fn send_submit_tee_proof_request( + app: &Router, + uri: &str, + tee_proof_request: &SubmitTeeProofRequest, +) -> Response { + let req_body = Body::from(serde_json::to_vec(tee_proof_request).unwrap()); + app.clone() + .oneshot( + Request::builder() + .method(Method::POST) + .uri(uri) + .header(http::header::CONTENT_TYPE, "application/json") + .body(req_body) + .unwrap(), + ) + .await + .unwrap() +} diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs index 47ae9cd87c3..efa3c9e00b1 100644 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ b/core/node/tee_verifier_input_producer/src/lib.rs @@ -259,6 +259,10 @@ impl JobProcessor for TeeVerifierInputProducer { .mark_job_as_successful(job_id, started_at, &object_path) .await .context("failed to mark job as successful for TeeVerifierInputProducer")?; + transaction + .tee_proof_generation_dal() + .insert_tee_proof_generation_job(job_id) + .await?; transaction .commit() .await diff --git a/etc/env/base/proof_data_handler.toml b/etc/env/base/proof_data_handler.toml index 3ea1ee03aa6..7a1999a03c3 100644 --- a/etc/env/base/proof_data_handler.toml +++ b/etc/env/base/proof_data_handler.toml @@ -1,3 +1,4 @@ [proof_data_handler] -http_port=3320 -proof_generation_timeout_in_secs=18000 +http_port = 3320 +proof_generation_timeout_in_secs = 18000 +tee_support = true diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index de7914bd3e6..03cba74c97c 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -190,6 +190,7 @@ witness_vector_generator: data_handler: http_port: 3320 proof_generation_timeout_in_secs: 18000 + tee_support: true prover_gateway: api_url: http://127.0.0.1:3320 api_poll_duration_secs: 1000 diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 571bc59c18c..4bdd726e308 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -9180,6 +9180,7 @@ dependencies = [ "zksync_dal", "zksync_object_store", "zksync_prover_interface", + "zksync_tee_verifier", "zksync_types", ]