diff --git a/Cargo.lock b/Cargo.lock index b9b45849d3..dc27c931cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -32,16 +32,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aead" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" -dependencies = [ - "crypto-common", - "generic-array 0.14.7", -] - [[package]] name = "aes" version = "0.6.0" @@ -50,18 +40,7 @@ checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" dependencies = [ "aes-soft", "aesni", - "cipher 0.2.5", -] - -[[package]] -name = "aes" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" -dependencies = [ - "cfg-if 1.0.0", - "cipher 0.4.4", - "cpufeatures", + "cipher", ] [[package]] @@ -70,25 +49,11 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5278b5fabbb9bd46e24aa69b2fdea62c99088e0a950a9be40e3e0101298f88da" dependencies = [ - "aead 0.3.2", - "aes 0.6.0", - "cipher 0.2.5", - "ctr 0.6.0", - "ghash 0.3.1", - "subtle", -] - -[[package]] -name = "aes-gcm" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" -dependencies = [ - "aead 0.5.2", - "aes 0.8.4", - "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", + "aead", + "aes", + "cipher", + "ctr", + "ghash", "subtle", ] @@ -98,7 +63,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" dependencies = [ - "cipher 0.2.5", + "cipher", "opaque-debug", ] @@ -108,7 +73,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" dependencies = [ - "cipher 0.2.5", + "cipher", "opaque-debug", ] @@ -208,12 +173,6 @@ version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" -[[package]] -name = "arrayvec" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" - [[package]] name = "ascii" version = "1.1.0" @@ -504,18 +463,6 @@ version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - [[package]] name = "block-buffer" version = "0.9.0" @@ -550,33 +497,12 @@ dependencies = [ "tracing", ] -[[package]] -name = "bs58" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" - -[[package]] -name = "bs58" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" -dependencies = [ - "tinyvec", -] - [[package]] name = "bumpalo" version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" -[[package]] -name = "byte-slice-cast" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" - [[package]] name = "byteorder" version = "1.5.0" @@ -645,16 +571,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "cipher" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" -dependencies = [ - "crypto-common", - "inout", -] - [[package]] name = "clap" version = "2.34.0" @@ -763,7 +679,7 @@ version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a5d7b21829bc7b4bf4754a978a241ae54ea55a40f92bb20216e54096f4b951" dependencies = [ - "aes-gcm 0.8.0", + "aes-gcm", "base64 0.13.1", "hkdf", "hmac", @@ -875,12 +791,6 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - [[package]] name = "crypto-common" version = "0.1.6" @@ -888,7 +798,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array 0.14.7", - "rand_core 0.6.4", "typenum", ] @@ -929,16 +838,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" dependencies = [ - "cipher 0.2.5", -] - -[[package]] -name = "ctr" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" -dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -1213,18 +1113,6 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" -[[package]] -name = "fixed-hash" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] - [[package]] name = "fnv" version = "1.0.7" @@ -1256,12 +1144,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - [[package]] name = "futures" version = "0.3.30" @@ -1433,17 +1315,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" dependencies = [ "opaque-debug", - "polyval 0.4.5", -] - -[[package]] -name = "ghash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" -dependencies = [ - "opaque-debug", - "polyval 0.6.1", + "polyval", ] [[package]] @@ -1554,12 +1426,6 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - [[package]] name = "hkdf" version = "0.10.0" @@ -1580,15 +1446,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "http" version = "0.2.11" @@ -1716,26 +1573,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "impl-codec" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "indexmap" version = "2.2.3" @@ -1752,15 +1589,6 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64e9829a50b42bb782c1df523f78d332fe371b10c661e78b7a3c34b0198e9fac" -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "instant" version = "0.1.12" @@ -1926,7 +1754,6 @@ dependencies = [ "stackslib", "thiserror", "tiny_http", - "wsts", ] [[package]] @@ -2190,54 +2017,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "p256k1" -version = "7.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40a031a559eb38c35a14096f21c366254501a06d41c4b327d2a7515d713a5b7" -dependencies = [ - "bitvec", - "bs58 0.4.0", - "cc", - "hex", - "itertools", - "num-traits", - "primitive-types", - "proc-macro2", - "quote", - "rand_core 0.6.4", - "rustfmt-wrapper", - "serde", - "sha2 0.10.8", - "syn 2.0.48", -] - -[[package]] -name = "parity-scale-codec" -version = "3.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" -dependencies = [ - "arrayvec", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "parking" version = "2.2.0" @@ -2397,7 +2176,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27abb6e4638dcecc65a92b50d7f1d87dd6dea987ba71db987b6bf881f4877e9d" dependencies = [ "num-traits", - "serde", ] [[package]] @@ -2408,19 +2186,7 @@ checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" dependencies = [ "cpuid-bool", "opaque-debug", - "universal-hash 0.4.0", -] - -[[package]] -name = "polyval" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" -dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "opaque-debug", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -2445,26 +2211,6 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" -[[package]] -name = "primitive-types" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" -dependencies = [ - "fixed-hash", - "impl-codec", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" -dependencies = [ - "toml_edit 0.20.7", -] - [[package]] name = "proc-macro-error" version = "1.0.4" @@ -2542,12 +2288,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - [[package]] name = "rand" version = "0.7.3" @@ -2887,12 +2627,6 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - [[package]] name = "rustc_version" version = "0.2.3" @@ -2920,19 +2654,6 @@ dependencies = [ "semver 1.0.21", ] -[[package]] -name = "rustfmt-wrapper" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1adc9dfed5cc999077978cc7163b9282c5751c8d39827c4ea8c8c220ca5a440" -dependencies = [ - "serde", - "tempfile", - "thiserror", - "toml 0.8.10", - "toolchain_find", -] - [[package]] name = "rustix" version = "0.37.27" @@ -3138,15 +2859,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "serde_spanned" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" -dependencies = [ - "serde", -] - [[package]] name = "serde_stacker" version = "0.1.11" @@ -3395,7 +3107,6 @@ dependencies = [ "slog-term", "time 0.2.27", "winapi 0.3.9", - "wsts", ] [[package]] @@ -3432,12 +3143,11 @@ dependencies = [ "tikv-jemallocator", "tiny_http", "tokio", - "toml 0.5.11", + "toml", "tracing", "tracing-subscriber", "url", "warp", - "wsts", ] [[package]] @@ -3470,11 +3180,10 @@ dependencies = [ "stackslib", "thiserror", "tiny_http", - "toml 0.5.11", + "toml", "tracing", "tracing-subscriber", "url", - "wsts", ] [[package]] @@ -3525,7 +3234,6 @@ dependencies = [ "time 0.2.27", "url", "winapi 0.3.9", - "wsts", ] [[package]] @@ -3537,12 +3245,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - [[package]] name = "stdext" version = "0.3.2" @@ -3667,24 +3369,6 @@ dependencies = [ "libc", ] -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tempfile" -version = "3.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" -dependencies = [ - "cfg-if 1.0.0", - "fastrand 2.0.1", - "rustix 0.38.31", - "windows-sys 0.52.0", -] - [[package]] name = "term" version = "0.7.0" @@ -3935,64 +3619,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml" -version = "0.8.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.22.5", -] - -[[package]] -name = "toml_datetime" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.20.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" -dependencies = [ - "indexmap", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.22.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e68c159e8f5ba8a28c4eb7b0c0c190d77bb479047ca713270048145a9ad28a" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime", - "winnow 0.6.1", -] - -[[package]] -name = "toolchain_find" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc8c9a7f0a2966e1acdaf0461023d0b01471eeead645370cf4c3f5cff153f2a" -dependencies = [ - "home", - "once_cell", - "regex", - "semver 1.0.21", - "walkdir", -] - [[package]] name = "tower-service" version = "0.3.2" @@ -4098,18 +3724,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" -[[package]] -name = "uint" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f64bba2c53b04fcab63c01a7d7427eadc821e3bc48c34dc9ba29c501164b52" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - [[package]] name = "unicase" version = "2.7.0" @@ -4156,16 +3770,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "universal-hash" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" -dependencies = [ - "crypto-common", - "subtle", -] - [[package]] name = "untrusted" version = "0.7.1" @@ -4560,24 +4164,6 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" -[[package]] -name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - -[[package]] -name = "winnow" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d90f4e0f530c4c69f62b80d839e9ef3855edc9cba471a160c4d692deed62b401" -dependencies = [ - "memchr", -] - [[package]] name = "winreg" version = "0.50.0" @@ -4598,37 +4184,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "wsts" -version = "9.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c80d57a61294350ed91e91eb20a6c34da084ec8f15d039bab79ce3efabbd1a4" -dependencies = [ - "aes-gcm 0.10.3", - "bs58 0.5.0", - "hashbrown", - "hex", - "num-traits", - "p256k1", - "polynomial", - "primitive-types", - "rand_core 0.6.4", - "serde", - "sha2 0.10.8", - "thiserror", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - [[package]] name = "zerocopy" version = "0.7.32" diff --git a/Cargo.toml b/Cargo.toml index 8ac168f1f7..10dc427e2e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,12 +15,11 @@ members = [ # Dependencies we want to keep the same between workspace members [workspace.dependencies] ed25519-dalek = { version = "2.1.1", features = ["serde", "rand_core"] } -hashbrown = "0.14.3" +hashbrown = { version = "0.14.3", features = ["serde"] } rand_core = "0.6" rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" -wsts = { version = "9.0.0", default-features = false } rusqlite = { version = "0.31.0", features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] } # Use a bit more than default optimization for diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index 7da9801674..63241d3256 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -32,7 +32,6 @@ stacks-common = { path = "../stacks-common" } stackslib = { path = "../stackslib"} thiserror = "1.0" tiny_http = "0.12" -wsts = { workspace = true } [dev-dependencies] mutants = "0.0.3" diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 4fb6d7a507..1de0e34f09 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -25,7 +25,7 @@ use std::time::SystemTime; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::boot::{MINERS_NAME, SIGNERS_NAME}; use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; -use blockstack_lib::chainstate::stacks::{StacksTransaction, ThresholdSignature}; +use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; @@ -48,12 +48,6 @@ use stacks_common::util::HexError; use tiny_http::{ Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, }; -use wsts::common::Signature; -use wsts::net::{ - DkgBegin, DkgEnd, DkgEndBegin, DkgPrivateBegin, DkgPrivateShares, DkgPublicShares, DkgStatus, - Message, NonceRequest, NonceResponse, Packet, SignatureShareRequest, SignatureShareResponse, -}; -use wsts::state_machine::signer; use crate::http::{decode_http_body, decode_http_request}; use crate::EventError; diff --git a/libsigner/src/libsigner.rs b/libsigner/src/libsigner.rs index 0da4e68a8f..878d428bfc 100644 --- a/libsigner/src/libsigner.rs +++ b/libsigner/src/libsigner.rs @@ -44,8 +44,6 @@ mod session; mod signer_set; /// v0 signer related code pub mod v0; -/// v1 signer related code -pub mod v1; use std::cmp::Eq; use std::fmt::Debug; diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index d4710f82e6..0a5ed49a6d 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -41,7 +41,7 @@ const STDERR: i32 = 2; /// Trait describing the needful components of a top-level runloop. /// This is where the signer business logic would go. /// Implement this, and you get all the multithreaded setup for free. -pub trait SignerRunLoop { +pub trait SignerRunLoop { /// Hint to set how long to wait for new events fn set_event_timeout(&mut self, timeout: Duration); /// Getter for the event poll timeout @@ -49,12 +49,7 @@ pub trait SignerRunLoop { /// Run one pass of the event loop, given new Signer events discovered since the last pass. /// Returns Some(R) if this is the final pass -- the runloop evaluated to R /// Returns None to keep running. - fn run_one_pass( - &mut self, - event: Option>, - cmd: Option, - res: &Sender, - ) -> Option; + fn run_one_pass(&mut self, event: Option>, res: &Sender) -> Option; /// This is the main loop body for the signer. It continuously receives events from /// `event_recv`, polling for up to `self.get_event_timeout()` units of time. Once it has @@ -66,7 +61,6 @@ pub trait SignerRunLoop { fn main_loop( &mut self, event_recv: Receiver>, - command_recv: Receiver, result_send: Sender, mut event_stop_signaler: EVST, ) -> Option { @@ -81,11 +75,7 @@ pub trait SignerRunLoop { return None; } }; - // Do not block for commands - let next_command_opt = command_recv.try_recv().ok(); - if let Some(final_state) = - self.run_one_pass(next_event_opt, next_command_opt, &result_send) - { + if let Some(final_state) = self.run_one_pass(next_event_opt, &result_send) { info!("Runloop exit; signaling event-receiver to stop"); event_stop_signaler.send(); return Some(final_state); @@ -95,13 +85,11 @@ pub trait SignerRunLoop { } /// The top-level signer implementation -pub struct Signer { +pub struct Signer { /// the runloop itself signer_loop: Option, /// the event receiver to use event_receiver: Option, - /// the command receiver to use - command_receiver: Option>, /// the result sender to use result_sender: Option>, /// phantom data for the codec @@ -193,18 +181,12 @@ pub fn set_runloop_signal_handler(mut st }).expect("FATAL: failed to set signal handler"); } -impl Signer { +impl Signer { /// Create a new signer with the given runloop and event receiver. - pub fn new( - runloop: SL, - event_receiver: EV, - command_receiver: Receiver, - result_sender: Sender, - ) -> Signer { + pub fn new(runloop: SL, event_receiver: EV, result_sender: Sender) -> Signer { Signer { signer_loop: Some(runloop), event_receiver: Some(event_receiver), - command_receiver: Some(command_receiver), result_sender: Some(result_sender), phantom_data: PhantomData, } @@ -212,12 +194,11 @@ impl Signer { } impl< - CMD: Send + 'static, R: Send + 'static, T: SignerEventTrait + 'static, - SL: SignerRunLoop + Send + 'static, + SL: SignerRunLoop + Send + 'static, EV: EventReceiver + Send + 'static, - > Signer + > Signer { /// This is a helper function to spawn both the runloop and event receiver in their own /// threads. Advanced signers may not need this method, and instead opt to run the receiver @@ -234,10 +215,6 @@ impl< .event_receiver .take() .ok_or(EventError::AlreadyRunning)?; - let command_receiver = self - .command_receiver - .take() - .ok_or(EventError::AlreadyRunning)?; let result_sender = self .result_sender .take() @@ -266,9 +243,7 @@ impl< let runloop_thread = thread::Builder::new() .name(format!("signer_runloop:{bind_port}")) .stack_size(THREAD_STACK_SIZE) - .spawn(move || { - signer_loop.main_loop(event_recv, command_receiver, result_sender, stop_signaler) - }) + .spawn(move || signer_loop.main_loop(event_recv, result_sender, stop_signaler)) .map_err(|e| { error!("SignerRunLoop failed to start: {:?}", &e); ret_stop_signaler.send(); diff --git a/libsigner/src/signer_set.rs b/libsigner/src/signer_set.rs index fdcb857faf..f47ac454aa 100644 --- a/libsigner/src/signer_set.rs +++ b/libsigner/src/signer_set.rs @@ -13,125 +13,77 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::{BTreeMap, HashMap}; + use blockstack_lib::chainstate::stacks::boot::NakamotoSignerEntry; -use hashbrown::{HashMap, HashSet}; use stacks_common::types::chainstate::{StacksAddress, StacksPublicKey}; -use wsts::curve::ecdsa; -use wsts::curve::point::{Compressed, Point}; -use wsts::state_machine::PublicKeys; -/// A reward set parsed into the structures required by WSTS party members and coordinators. +/// A reward set parsed into relevant structures #[derive(Debug, Clone)] pub struct SignerEntries { - /// The signer addresses mapped to signer id - pub signer_ids: HashMap, - /// The signer ids mapped to public key and key ids mapped to public keys - pub public_keys: PublicKeys, - /// The signer ids mapped to key ids - pub signer_key_ids: HashMap>, - /// The signer ids mapped to wsts public keys - pub signer_public_keys: HashMap, - /// The signer ids mapped to a hash set of key ids - /// The wsts coordinator uses a hash set for each signer since it needs to do lots of lookups - pub coordinator_key_ids: HashMap>, + /// The signer addresses mapped to signer ID + pub signer_addr_to_id: HashMap, + /// The signer IDs mapped to addresses. Uses a BTreeMap to ensure *reward cycle order* + pub signer_id_to_addr: BTreeMap, + /// signer ID mapped to public key + pub signer_id_to_pk: HashMap, + /// public_key mapped to signer ID + pub signer_pk_to_id: HashMap, + /// The signer public keys + pub signer_pks: Vec, + /// The signer addresses + pub signer_addresses: Vec, + /// The signer address mapped to signing weight + pub signer_addr_to_weight: HashMap, } /// Parsing errors for `SignerEntries` #[derive(Debug)] pub enum Error { /// A member of the signing set has a signing key buffer - /// which does not represent a ecdsa public key. + /// which does not represent a valid Stacks public key BadSignerPublicKey(String), /// The number of signers was greater than u32::MAX SignerCountOverflow, } impl SignerEntries { - /// Try to parse the reward set defined by `NakamotoSignEntry` into the structures required - /// by WSTS party members and coordinators. + /// Try to parse the reward set defined by `NakamotoSignEntry` into the SignerEntries struct pub fn parse(is_mainnet: bool, reward_set: &[NakamotoSignerEntry]) -> Result { - let mut weight_end = 1; - let mut signer_key_ids = HashMap::with_capacity(reward_set.len()); - let mut signer_public_keys = HashMap::with_capacity(reward_set.len()); - let mut coordinator_key_ids = HashMap::with_capacity(4000); - let mut signer_ids = HashMap::with_capacity(reward_set.len()); - let mut wsts_signers = HashMap::new(); - let mut wsts_key_ids = HashMap::new(); + let mut signer_pk_to_id = HashMap::with_capacity(reward_set.len()); + let mut signer_id_to_pk = HashMap::with_capacity(reward_set.len()); + let mut signer_addr_to_id = HashMap::with_capacity(reward_set.len()); + let mut signer_pks = Vec::with_capacity(reward_set.len()); + let mut signer_id_to_addr = BTreeMap::new(); + let mut signer_addr_to_weight = HashMap::new(); + let mut signer_addresses = Vec::with_capacity(reward_set.len()); for (i, entry) in reward_set.iter().enumerate() { let signer_id = u32::try_from(i).map_err(|_| Error::SignerCountOverflow)?; - let ecdsa_pk = - ecdsa::PublicKey::try_from(entry.signing_key.as_slice()).map_err(|e| { - Error::BadSignerPublicKey(format!( - "Failed to convert signing key to ecdsa::PublicKey: {e}" - )) - })?; - let signer_public_key = Point::try_from(&Compressed::from(ecdsa_pk.to_bytes())) - .map_err(|e| { - Error::BadSignerPublicKey(format!( - "Failed to convert signing key to wsts::Point: {e}" - )) - })?; - let stacks_public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + let signer_public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) .map_err(|e| { Error::BadSignerPublicKey(format!( "Failed to convert signing key to StacksPublicKey: {e}" )) })?; - let stacks_address = StacksAddress::p2pkh(is_mainnet, &stacks_public_key); - signer_ids.insert(stacks_address, signer_id); - - signer_public_keys.insert(signer_id, signer_public_key); - let weight_start = weight_end; - weight_end = weight_start + entry.weight; - let key_ids: HashSet = (weight_start..weight_end).collect(); - for key_id in key_ids.iter() { - wsts_key_ids.insert(*key_id, ecdsa_pk); - } - signer_key_ids.insert(signer_id, (weight_start..weight_end).collect()); - coordinator_key_ids.insert(signer_id, key_ids); - wsts_signers.insert(signer_id, ecdsa_pk); + let stacks_address = StacksAddress::p2pkh(is_mainnet, &signer_public_key); + signer_addr_to_id.insert(stacks_address, signer_id); + signer_id_to_pk.insert(signer_id, signer_public_key); + signer_pk_to_id.insert(signer_public_key, signer_id); + signer_pks.push(signer_public_key); + signer_id_to_addr.insert(signer_id, stacks_address); + signer_addr_to_weight.insert(stacks_address, entry.weight); + signer_addresses.push(stacks_address); } Ok(Self { - signer_ids, - public_keys: PublicKeys { - signers: wsts_signers, - key_ids: wsts_key_ids, - }, - signer_key_ids, - signer_public_keys, - coordinator_key_ids, + signer_addr_to_id, + signer_id_to_pk, + signer_pk_to_id, + signer_pks, + signer_id_to_addr, + signer_addr_to_weight, + signer_addresses, }) } - - /// Return the number of Key IDs in the WSTS group signature - pub fn count_keys(&self) -> Result { - self.public_keys - .key_ids - .len() - .try_into() - .map_err(|_| Error::SignerCountOverflow) - } - - /// Return the number of Key IDs in the WSTS group signature - pub fn count_signers(&self) -> Result { - self.public_keys - .signers - .len() - .try_into() - .map_err(|_| Error::SignerCountOverflow) - } - - /// Return the number of Key IDs required to sign a message with the WSTS group signature - pub fn get_signing_threshold(&self) -> Result { - let num_keys = self.count_keys()?; - Ok((num_keys as f64 * 7_f64 / 10_f64).ceil() as u32) - } - - /// Return the number of Key IDs required to sign a message with the WSTS group signature - pub fn get_dkg_threshold(&self) -> Result { - let num_keys = self.count_keys()?; - Ok((num_keys as f64 * 9_f64 / 10_f64).ceil() as u32) - } } diff --git a/libsigner/src/tests/http.rs b/libsigner/src/tests/http.rs index d0f3887b45..b31fb042e8 100644 --- a/libsigner/src/tests/http.rs +++ b/libsigner/src/tests/http.rs @@ -25,7 +25,7 @@ use crate::http::{decode_http_body, decode_http_request, decode_http_response, r #[test] fn test_decode_http_request_ok() { - let tests = vec![ + let tests = [ ("GET /foo HTTP/1.1\r\nHost: localhost:6270\r\n\r\n", ("GET", "/foo", vec![("host", "localhost:6270")])), ("POST asdf HTTP/1.1\r\nHost: core.blockstack.org\r\nFoo: Bar\r\n\r\n", @@ -61,7 +61,7 @@ fn test_decode_http_request_ok() { #[test] fn test_decode_http_request_err() { - let tests = vec![ + let tests = [ ( "GET /foo HTTP/1.1\r\n", EventError::Deserialize("".to_string()), @@ -99,7 +99,7 @@ fn test_decode_http_request_err() { #[test] fn test_decode_http_response_ok() { - let tests = vec![ + let tests = [ ("HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\nContent-Length: 123\r\nX-Request-ID: 0\r\n\r\n", vec![("content-type", "application/octet-stream"), ("content-length", "123"), ("x-request-id", "0")]), ("HTTP/1.1 200 Ok\r\nContent-Type: application/octet-stream\r\nTransfer-encoding: chunked\r\nX-Request-ID: 0\r\n\r\n", @@ -123,7 +123,7 @@ fn test_decode_http_response_ok() { #[test] fn test_decode_http_response_err() { - let tests = vec![ + let tests = [ ("HTTP/1.1 400 Bad Request\r\nContent-Type: application/json\r\nContent-Length: 456\r\nFoo: Bar\r\nX-Request-ID: 0\r\n\r\n", RPCError::HttpError(400)), ("HTTP/1.1 200", @@ -223,7 +223,7 @@ impl Write for MockHTTPSocket { #[test] fn test_run_http_request_with_body() { - let tests = vec![ + let tests = [ ("GET", "/test-no-content-type-and-no-body", None, vec![]), ( "GET", @@ -288,7 +288,7 @@ fn test_run_http_request_with_body() { #[test] fn test_run_http_request_no_body() { - let tests = vec![ + let tests = [ ("GET", "/test-no-content-type-and-no-body", None, vec![]), ( "GET", diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index fbe1e59089..8ef6d38eee 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -24,22 +24,26 @@ use std::time::Duration; use std::{mem, thread}; use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; +use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME; use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; use blockstack_lib::util_lib::boot::boot_code_id; +use clarity::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; +use clarity::util::hash::Sha512Trunc256Sum; +use clarity::util::secp256k1::MessageSignature; use clarity::vm::types::QualifiedContractIdentifier; use libstackerdb::StackerDBChunkData; +use stacks_common::bitvec::BitVec; use stacks_common::codec::{ read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, StacksMessageCodec, }; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; -use wsts::net::{DkgBegin, Packet}; use crate::events::{SignerEvent, SignerEventTrait}; -use crate::v1::messages::SignerMessage; -use crate::{Signer, SignerEventReceiver, SignerRunLoop}; +use crate::v0::messages::{BlockRejection, SignerMessage}; +use crate::{BlockProposal, Signer, SignerEventReceiver, SignerRunLoop}; /// Simple runloop implementation. It receives `max_events` events and returns `events` from the /// last call to `run_one_pass` as its final state. @@ -63,7 +67,7 @@ enum Command { Empty, } -impl SignerRunLoop>, Command, T> for SimpleRunLoop { +impl SignerRunLoop>, T> for SimpleRunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.poll_timeout = timeout; } @@ -75,7 +79,6 @@ impl SignerRunLoop>, Command, T> for Sim fn run_one_pass( &mut self, event: Option>, - _cmd: Option, _res: &Sender>>, ) -> Option>> { debug!("Got event: {:?}", &event); @@ -99,16 +102,34 @@ impl SignerRunLoop>, Command, T> for Sim fn test_simple_signer() { let contract_id = NakamotoSigners::make_signers_db_contract_id(0, 0, false); let ev = SignerEventReceiver::new(false); - let (_cmd_send, cmd_recv) = channel(); let (res_send, _res_recv) = channel(); let max_events = 5; - let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, cmd_recv, res_send); + let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, res_send); let endpoint: SocketAddr = "127.0.0.1:30000".parse().unwrap(); let mut chunks = vec![]; + let block_proposal = BlockProposal { + block: NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: 10, + burn_spent: 10, + consensus_hash: ConsensusHash([0; 20]), + parent_block_id: StacksBlockId([0; 32]), + tx_merkle_root: Sha512Trunc256Sum([0; 32]), + state_index_root: TrieHash([0; 32]), + timestamp: 11, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs: vec![], + }, + burn_height: 2, + reward_cycle: 1, + }; for i in 0..max_events { let privk = Secp256k1PrivateKey::new(); - let msg = wsts::net::Message::DkgBegin(DkgBegin { dkg_id: 0 }); - let message = SignerMessage::Packet(Packet { msg, sig: vec![] }); + let message = SignerMessage::BlockProposal(block_proposal.clone()); let message_bytes = message.serialize_to_vec(); let mut chunk = StackerDBChunkData::new(i as u32, 1, message_bytes); chunk.sign(&privk).unwrap(); @@ -178,10 +199,9 @@ fn test_simple_signer() { #[test] fn test_status_endpoint() { let ev = SignerEventReceiver::new(false); - let (_cmd_send, cmd_recv) = channel(); let (res_send, _res_recv) = channel(); let max_events = 1; - let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, cmd_recv, res_send); + let mut signer = Signer::new(SimpleRunLoop::new(max_events), ev, res_send); let endpoint: SocketAddr = "127.0.0.1:31000".parse().unwrap(); // simulate a node that's trying to push data diff --git a/libsigner/src/v0/messages.rs b/libsigner/src/v0/messages.rs index 7209398c1c..47d317992d 100644 --- a/libsigner/src/v0/messages.rs +++ b/libsigner/src/v0/messages.rs @@ -887,8 +887,8 @@ impl From for SignerMessage { mod test { use blockstack_lib::chainstate::nakamoto::NakamotoBlockHeader; use blockstack_lib::chainstate::stacks::{ - ThresholdSignature, TransactionAnchorMode, TransactionAuth, TransactionPayload, - TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, + TransactionSmartContract, TransactionVersion, }; use blockstack_lib::util_lib::strings::StacksString; use clarity::consts::CHAIN_ID_MAINNET; diff --git a/libsigner/src/v1/messages.rs b/libsigner/src/v1/messages.rs deleted file mode 100644 index b412d9a66f..0000000000 --- a/libsigner/src/v1/messages.rs +++ /dev/null @@ -1,1869 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Messages in the signer-miner interaction have a multi-level hierarchy. -//! Signers send messages to each other through Packet messages. These messages, -//! as well as `BlockResponse`, `Transactions`, and `DkgResults` messages are stored -//! StackerDBs based on the `MessageSlotID` for the particular message type. This is a -//! shared identifier space between the four message kinds and their subtypes. -//! -//! These four message kinds are differentiated with a `SignerMessageTypePrefix` -//! and the `SignerMessage` enum. - -use std::fmt::{Debug, Display}; -use std::io::{Read, Write}; -use std::net::{SocketAddr, TcpListener, TcpStream}; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::mpsc::Sender; -use std::sync::Arc; - -use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; -use blockstack_lib::chainstate::nakamoto::NakamotoBlock; -use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; -use blockstack_lib::chainstate::stacks::{StacksTransaction, ThresholdSignature}; -use blockstack_lib::net::api::postblock_proposal::{ - BlockValidateReject, BlockValidateResponse, ValidateRejectCode, -}; -use blockstack_lib::util_lib::boot::boot_code_id; -use clarity::util::retry::BoundReader; -use clarity::vm::types::serialization::SerializationError; -use clarity::vm::types::QualifiedContractIdentifier; -use hashbrown::{HashMap, HashSet}; -use serde::{Deserialize, Serialize}; -use stacks_common::codec::{ - read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, - StacksMessageCodec, -}; -use stacks_common::consts::SIGNER_SLOTS_PER_USER; -use stacks_common::util::hash::Sha512Trunc256Sum; -use tiny_http::{ - Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, -}; -use wsts::common::{PolyCommitment, PublicNonce, Signature, SignatureShare, TupleProof}; -use wsts::curve::point::{Compressed, Point}; -use wsts::curve::scalar::Scalar; -use wsts::net::{ - BadPrivateShare, DkgBegin, DkgEnd, DkgEndBegin, DkgFailure, DkgPrivateBegin, DkgPrivateShares, - DkgPublicShares, DkgStatus, Message, NonceRequest, NonceResponse, Packet, - SignatureShareRequest, SignatureShareResponse, -}; -use wsts::schnorr::ID; -use wsts::state_machine::{signer, SignError}; - -use crate::http::{decode_http_body, decode_http_request}; -use crate::{EventError, MessageSlotID as MessageSlotIDTrait, SignerMessage as SignerMessageTrait}; - -define_u8_enum!( -/// Enum representing the stackerdb message identifier: this is -/// the contract index in the signers contracts (i.e., X in signers-0-X) -MessageSlotID { - /// DkgBegin message - DkgBegin = 0, - /// DkgPrivateBegin - DkgPrivateBegin = 1, - /// DkgEndBegin - DkgEndBegin = 2, - /// DkgEnd - DkgEnd = 3, - /// DkgPublicshares - DkgPublicShares = 4, - /// DkgPrivateShares - DkgPrivateShares = 5, - /// NonceRequest - NonceRequest = 6, - /// NonceResponse - NonceResponse = 7, - /// SignatureShareRequest - SignatureShareRequest = 8, - /// SignatureShareResponse - SignatureShareResponse = 9, - /// Block proposal responses for miners to observe - BlockResponse = 10, - /// Transactions list for miners and signers to observe - Transactions = 11, - /// DKG Results - DkgResults = 12, - /// Persisted encrypted signer state containing DKG shares - EncryptedSignerState = 13 -}); - -impl MessageSlotIDTrait for MessageSlotID { - fn stacker_db_contract(&self, mainnet: bool, reward_cycle: u64) -> QualifiedContractIdentifier { - NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) - } - fn all() -> &'static [Self] { - MessageSlotID::ALL - } -} - -impl SignerMessageTrait for SignerMessage { - fn msg_id(&self) -> Option { - Some(self.msg_id()) - } -} - -define_u8_enum!( -/// Enum representing the signer message type prefix -SignerMessageTypePrefix { - /// A block response message - BlockResponse = 0, - /// A wsts packet message - Packet = 1, - /// A list of transactions that a signer cares about - Transactions = 2, - /// The results of a successful DKG - DkgResults = 3, - /// The encrypted state of the signer to be persisted - EncryptedSignerState = 4 -}); - -#[cfg_attr(test, mutants::skip)] -impl MessageSlotID { - /// Return the StackerDB contract corresponding to messages of this type - pub fn stacker_db_contract( - &self, - mainnet: bool, - reward_cycle: u64, - ) -> QualifiedContractIdentifier { - NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) - } - - /// Return the u32 identifier for the message slot (used to index the contract that stores it) - pub fn to_u32(&self) -> u32 { - self.to_u8().into() - } -} - -impl Display for MessageSlotID { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}({})", self, self.to_u8()) - } -} - -impl TryFrom for SignerMessageTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown signer message type prefix: {value}")) - }) - } -} - -impl From<&SignerMessage> for SignerMessageTypePrefix { - #[cfg_attr(test, mutants::skip)] - fn from(message: &SignerMessage) -> Self { - match message { - SignerMessage::Packet(_) => SignerMessageTypePrefix::Packet, - SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, - SignerMessage::Transactions(_) => SignerMessageTypePrefix::Transactions, - SignerMessage::DkgResults { .. } => SignerMessageTypePrefix::DkgResults, - SignerMessage::EncryptedSignerState(_) => SignerMessageTypePrefix::EncryptedSignerState, - } - } -} - -define_u8_enum!( -/// Enum representing the message type prefix -MessageTypePrefix { - /// DkgBegin message - DkgBegin = 0, - /// DkgPrivateBegin message - DkgPrivateBegin = 1, - /// DkgEndBegin message - DkgEndBegin = 2, - /// DkgEnd message - DkgEnd = 3, - /// DkgPublicShares message - DkgPublicShares = 4, - /// DkgPrivateShares message - DkgPrivateShares = 5, - /// NonceRequest message - NonceRequest = 6, - /// NonceResponse message - NonceResponse = 7, - /// SignatureShareRequest message - SignatureShareRequest = 8, - /// SignatureShareResponse message - SignatureShareResponse = 9 -}); - -impl From<&Message> for MessageTypePrefix { - fn from(msg: &Message) -> Self { - match msg { - Message::DkgBegin(_) => MessageTypePrefix::DkgBegin, - Message::DkgPrivateBegin(_) => MessageTypePrefix::DkgPrivateBegin, - Message::DkgEndBegin(_) => MessageTypePrefix::DkgEndBegin, - Message::DkgEnd(_) => MessageTypePrefix::DkgEnd, - Message::DkgPublicShares(_) => MessageTypePrefix::DkgPublicShares, - Message::DkgPrivateShares(_) => MessageTypePrefix::DkgPrivateShares, - Message::NonceRequest(_) => MessageTypePrefix::NonceRequest, - Message::NonceResponse(_) => MessageTypePrefix::NonceResponse, - Message::SignatureShareRequest(_) => MessageTypePrefix::SignatureShareRequest, - Message::SignatureShareResponse(_) => MessageTypePrefix::SignatureShareResponse, - } - } -} - -impl TryFrom for MessageTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown packet type prefix: {value}")) - }) - } -} - -define_u8_enum!( -/// Enum representing the reject code type prefix -RejectCodeTypePrefix { - /// Validation failed - ValidationFailed = 0, - /// Signed rejection - SignedRejection = 1, - /// Insufficient signers - InsufficientSigners = 2, - /// Missing transactions - MissingTransactions = 3, - /// Connectivity issues - ConnectivityIssues = 4, - /// Nonce timeout - NonceTimeout = 5, - /// Aggregator error - AggregatorError = 6 -}); - -impl TryFrom for RejectCodeTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown reject code type prefix: {value}")) - }) - } -} - -impl From<&RejectCode> for RejectCodeTypePrefix { - fn from(reject_code: &RejectCode) -> Self { - match reject_code { - RejectCode::ValidationFailed(_) => RejectCodeTypePrefix::ValidationFailed, - RejectCode::SignedRejection(_) => RejectCodeTypePrefix::SignedRejection, - RejectCode::InsufficientSigners(_) => RejectCodeTypePrefix::InsufficientSigners, - RejectCode::MissingTransactions(_) => RejectCodeTypePrefix::MissingTransactions, - RejectCode::ConnectivityIssues => RejectCodeTypePrefix::ConnectivityIssues, - RejectCode::NonceTimeout(_) => RejectCodeTypePrefix::NonceTimeout, - RejectCode::AggregatorError(_) => RejectCodeTypePrefix::AggregatorError, - } - } -} - -/// The messages being sent through the stacker db contracts -#[derive(Clone, PartialEq, Serialize, Deserialize)] -pub enum SignerMessage { - /// The signed/validated Nakamoto block for miners to observe - BlockResponse(BlockResponse), - /// DKG and Signing round data for other signers to observe - Packet(Packet), - /// The list of transactions for miners and signers to observe that this signer cares about - Transactions(Vec), - /// The results of a successful DKG - DkgResults { - /// The aggregate key from the DKG round - aggregate_key: Point, - /// The polynomial commits used to construct the aggregate key - party_polynomials: Vec<(u32, PolyCommitment)>, - }, - /// The encrypted state of the signer to be persisted - EncryptedSignerState(Vec), -} - -impl Debug for SignerMessage { - #[cfg_attr(test, mutants::skip)] - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::BlockResponse(b) => Debug::fmt(b, f), - Self::Packet(p) => Debug::fmt(p, f), - Self::Transactions(t) => f.debug_tuple("Transactions").field(t).finish(), - Self::DkgResults { - aggregate_key, - party_polynomials, - } => { - let party_polynomials: Vec<_> = party_polynomials - .iter() - .map(|(ix, commit)| (ix, commit.to_string())) - .collect(); - f.debug_struct("DkgResults") - .field("aggregate_key", &aggregate_key.to_string()) - .field("party_polynomials", &party_polynomials) - .finish() - } - Self::EncryptedSignerState(s) => { - f.debug_tuple("EncryptedSignerState").field(s).finish() - } - } - } -} - -impl SignerMessage { - /// Helper function to determine the slot ID for the provided stacker-db writer id - #[cfg_attr(test, mutants::skip)] - pub fn msg_id(&self) -> MessageSlotID { - match self { - Self::Packet(packet) => match packet.msg { - Message::DkgBegin(_) => MessageSlotID::DkgBegin, - Message::DkgPrivateBegin(_) => MessageSlotID::DkgPrivateBegin, - Message::DkgEndBegin(_) => MessageSlotID::DkgEndBegin, - Message::DkgEnd(_) => MessageSlotID::DkgEnd, - Message::DkgPublicShares(_) => MessageSlotID::DkgPublicShares, - Message::DkgPrivateShares(_) => MessageSlotID::DkgPrivateShares, - Message::NonceRequest(_) => MessageSlotID::NonceRequest, - Message::NonceResponse(_) => MessageSlotID::NonceResponse, - Message::SignatureShareRequest(_) => MessageSlotID::SignatureShareRequest, - Message::SignatureShareResponse(_) => MessageSlotID::SignatureShareResponse, - }, - Self::BlockResponse(_) => MessageSlotID::BlockResponse, - Self::Transactions(_) => MessageSlotID::Transactions, - Self::DkgResults { .. } => MessageSlotID::DkgResults, - Self::EncryptedSignerState(_) => MessageSlotID::EncryptedSignerState, - } - } -} - -impl SignerMessage { - /// Provide an interface for consensus serializing a DkgResults `SignerMessage` - /// without constructing the DkgResults struct (this eliminates a clone) - pub fn serialize_dkg_result<'a, W: Write, I>( - fd: &mut W, - aggregate_key: &Point, - party_polynomials: I, - ) -> Result<(), CodecError> - where - I: ExactSizeIterator + Iterator, - { - SignerMessageTypePrefix::DkgResults - .to_u8() - .consensus_serialize(fd)?; - Self::serialize_dkg_result_components(fd, aggregate_key, party_polynomials) - } - - /// Serialize the internal components of DkgResults (this eliminates a clone) - fn serialize_dkg_result_components<'a, W: Write, I>( - fd: &mut W, - aggregate_key: &Point, - party_polynomials: I, - ) -> Result<(), CodecError> - where - I: ExactSizeIterator + Iterator, - { - aggregate_key.inner_consensus_serialize(fd)?; - let polynomials_len: u32 = party_polynomials - .len() - .try_into() - .map_err(|_| CodecError::ArrayTooLong)?; - polynomials_len.consensus_serialize(fd)?; - for (party_id, polynomial) in party_polynomials { - party_id.consensus_serialize(fd)?; - polynomial.inner_consensus_serialize(fd)?; - } - Ok(()) - } -} - -impl StacksMessageCodec for SignerMessage { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(SignerMessageTypePrefix::from(self) as u8))?; - match self { - SignerMessage::Packet(packet) => { - packet.inner_consensus_serialize(fd)?; - } - SignerMessage::BlockResponse(block_response) => { - write_next(fd, block_response)?; - } - SignerMessage::Transactions(transactions) => { - write_next(fd, transactions)?; - } - SignerMessage::DkgResults { - aggregate_key, - party_polynomials, - } => { - Self::serialize_dkg_result_components( - fd, - aggregate_key, - party_polynomials.iter().map(|(a, b)| (a, b)), - )?; - } - SignerMessage::EncryptedSignerState(encrypted_state) => { - write_next(fd, encrypted_state)?; - } - }; - Ok(()) - } - - #[cfg_attr(test, mutants::skip)] - fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = SignerMessageTypePrefix::try_from(type_prefix_byte)?; - let message = match type_prefix { - SignerMessageTypePrefix::Packet => { - let packet = Packet::inner_consensus_deserialize(fd)?; - SignerMessage::Packet(packet) - } - SignerMessageTypePrefix::BlockResponse => { - let block_response = read_next::(fd)?; - SignerMessage::BlockResponse(block_response) - } - SignerMessageTypePrefix::Transactions => { - let transactions = read_next::, _>(fd)?; - SignerMessage::Transactions(transactions) - } - SignerMessageTypePrefix::DkgResults => { - let aggregate_key = Point::inner_consensus_deserialize(fd)?; - let party_polynomial_len = u32::consensus_deserialize(fd)?; - let mut party_polynomials = Vec::with_capacity( - party_polynomial_len - .try_into() - .expect("FATAL: u32 could not fit in usize"), - ); - for _ in 0..party_polynomial_len { - let party_id = u32::consensus_deserialize(fd)?; - let polynomial = PolyCommitment::inner_consensus_deserialize(fd)?; - party_polynomials.push((party_id, polynomial)); - } - Self::DkgResults { - aggregate_key, - party_polynomials, - } - } - SignerMessageTypePrefix::EncryptedSignerState => { - // Typically the size of the signer state is much smaller, but in the fully degenerate case the size of the persisted state is - // 2800 * 32 * 4 + C for some small constant C. - // To have some margin, we're expanding the left term with an additional factor 4 - let max_encrypted_state_size = 2800 * 32 * 4 * 4; - let mut bound_reader = BoundReader::from_reader(fd, max_encrypted_state_size); - let encrypted_state = read_next::<_, _>(&mut bound_reader)?; - SignerMessage::EncryptedSignerState(encrypted_state) - } - }; - Ok(message) - } -} - -/// Work around for the fact that a lot of the structs being desierialized are not defined in messages.rs -pub trait StacksMessageCodecExtensions: Sized { - /// Serialize the struct to the provided writer - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError>; - /// Deserialize the struct from the provided reader - fn inner_consensus_deserialize(fd: &mut R) -> Result; -} - -impl StacksMessageCodecExtensions for Scalar { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.to_bytes()) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let scalar_bytes: [u8; 32] = read_next(fd)?; - Ok(Scalar::from(scalar_bytes)) - } -} - -impl StacksMessageCodecExtensions for Point { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.compress().as_bytes().to_vec()) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let compressed_bytes: Vec = read_next(fd)?; - let compressed = Compressed::try_from(compressed_bytes.as_slice()) - .map_err(|e| CodecError::DeserializeError(e.to_string()))?; - Point::try_from(&compressed).map_err(|e| CodecError::DeserializeError(e.to_string())) - } -} - -impl StacksMessageCodecExtensions for PolyCommitment { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.id.inner_consensus_serialize(fd)?; - let commit_len: u32 = self - .poly - .len() - .try_into() - .map_err(|_| CodecError::ArrayTooLong)?; - commit_len.consensus_serialize(fd)?; - for poly in self.poly.iter() { - poly.inner_consensus_serialize(fd)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let id = ID::inner_consensus_deserialize(fd)?; - let commit_len = u32::consensus_deserialize(fd)?; - let mut poly = Vec::with_capacity( - commit_len - .try_into() - .expect("FATAL: u32 could not fit in usize"), - ); - for _ in 0..commit_len { - poly.push(Point::inner_consensus_deserialize(fd)?); - } - Ok(Self { id, poly }) - } -} - -impl StacksMessageCodecExtensions for ID { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.id.inner_consensus_serialize(fd)?; - self.kG.inner_consensus_serialize(fd)?; - self.kca.inner_consensus_serialize(fd) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let id = Scalar::inner_consensus_deserialize(fd)?; - let k_g = Point::inner_consensus_deserialize(fd)?; - let kca = Scalar::inner_consensus_deserialize(fd)?; - Ok(Self { id, kG: k_g, kca }) - } -} - -#[allow(non_snake_case)] -impl StacksMessageCodecExtensions for TupleProof { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.R.inner_consensus_serialize(fd)?; - self.rB.inner_consensus_serialize(fd)?; - self.z.inner_consensus_serialize(fd) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let R = Point::inner_consensus_deserialize(fd)?; - let rB = Point::inner_consensus_deserialize(fd)?; - let z = Scalar::inner_consensus_deserialize(fd)?; - Ok(Self { R, rB, z }) - } -} - -impl StacksMessageCodecExtensions for BadPrivateShare { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.shared_key.inner_consensus_serialize(fd)?; - self.tuple_proof.inner_consensus_serialize(fd) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let shared_key = Point::inner_consensus_deserialize(fd)?; - let tuple_proof = TupleProof::inner_consensus_deserialize(fd)?; - Ok(Self { - shared_key, - tuple_proof, - }) - } -} - -impl StacksMessageCodecExtensions for HashSet { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(self.len() as u32))?; - for i in self { - write_next(fd, i)?; - } - Ok(()) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let mut set = Self::new(); - let len = read_next::(fd)?; - for _ in 0..len { - let i = read_next::(fd)?; - set.insert(i); - } - Ok(set) - } -} - -define_u8_enum!( -/// Enum representing the DKG failure type prefix -DkgFailureTypePrefix { - /// Bad state - BadState = 0, - /// Missing public shares - MissingPublicShares = 1, - /// Bad public shares - BadPublicShares = 2, - /// Missing private shares - MissingPrivateShares = 3, - /// Bad private shares - BadPrivateShares = 4 -}); - -impl TryFrom for DkgFailureTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown DKG failure type prefix: {value}")) - }) - } -} - -impl From<&DkgFailure> for DkgFailureTypePrefix { - fn from(failure: &DkgFailure) -> Self { - match failure { - DkgFailure::BadState => DkgFailureTypePrefix::BadState, - DkgFailure::MissingPublicShares(_) => DkgFailureTypePrefix::MissingPublicShares, - DkgFailure::BadPublicShares(_) => DkgFailureTypePrefix::BadPublicShares, - DkgFailure::MissingPrivateShares(_) => DkgFailureTypePrefix::MissingPrivateShares, - DkgFailure::BadPrivateShares(_) => DkgFailureTypePrefix::BadPrivateShares, - } - } -} - -impl StacksMessageCodecExtensions for DkgFailure { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(DkgFailureTypePrefix::from(self) as u8))?; - match self { - DkgFailure::BadState => { - // No additional data to serialize - } - DkgFailure::MissingPublicShares(shares) => { - shares.inner_consensus_serialize(fd)?; - } - DkgFailure::BadPublicShares(shares) => { - shares.inner_consensus_serialize(fd)?; - } - DkgFailure::MissingPrivateShares(shares) => { - shares.inner_consensus_serialize(fd)?; - } - DkgFailure::BadPrivateShares(shares) => { - write_next(fd, &(shares.len() as u32))?; - for (id, share) in shares { - write_next(fd, id)?; - share.inner_consensus_serialize(fd)?; - } - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let failure_type_prefix_byte = read_next::(fd)?; - let failure_type_prefix = DkgFailureTypePrefix::try_from(failure_type_prefix_byte)?; - let failure_type = match failure_type_prefix { - DkgFailureTypePrefix::BadState => DkgFailure::BadState, - DkgFailureTypePrefix::MissingPublicShares => { - let set = HashSet::::inner_consensus_deserialize(fd)?; - DkgFailure::MissingPublicShares(set) - } - DkgFailureTypePrefix::BadPublicShares => { - let set = HashSet::::inner_consensus_deserialize(fd)?; - DkgFailure::BadPublicShares(set) - } - DkgFailureTypePrefix::MissingPrivateShares => { - let set = HashSet::::inner_consensus_deserialize(fd)?; - DkgFailure::MissingPrivateShares(set) - } - DkgFailureTypePrefix::BadPrivateShares => { - let mut map = HashMap::new(); - let len = read_next::(fd)?; - for _ in 0..len { - let i = read_next::(fd)?; - let bad_share = BadPrivateShare::inner_consensus_deserialize(fd)?; - map.insert(i, bad_share); - } - DkgFailure::BadPrivateShares(map) - } - }; - Ok(failure_type) - } -} - -impl StacksMessageCodecExtensions for DkgBegin { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - Ok(DkgBegin { dkg_id }) - } -} - -impl StacksMessageCodecExtensions for DkgPrivateBegin { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_ids)?; - write_next(fd, &self.key_ids) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_ids = read_next::, _>(fd)?; - let key_ids = read_next::, _>(fd)?; - Ok(DkgPrivateBegin { - dkg_id, - signer_ids, - key_ids, - }) - } -} - -impl StacksMessageCodecExtensions for DkgEndBegin { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_ids)?; - write_next(fd, &self.key_ids) - } - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_ids = read_next::, _>(fd)?; - let key_ids = read_next::, _>(fd)?; - Ok(DkgEndBegin { - dkg_id, - signer_ids, - key_ids, - }) - } -} - -define_u8_enum!( -/// Enum representing the DKG status type prefix -DkgStatusTypePrefix { - /// Success - Success = 0, - /// Failure - Failure = 1 -}); - -impl TryFrom for DkgStatusTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown DKG status type prefix: {value}")) - }) - } -} - -impl From<&DkgStatus> for DkgStatusTypePrefix { - fn from(status: &DkgStatus) -> Self { - match status { - DkgStatus::Success => DkgStatusTypePrefix::Success, - DkgStatus::Failure(_) => DkgStatusTypePrefix::Failure, - } - } -} - -impl StacksMessageCodecExtensions for DkgEnd { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(DkgStatusTypePrefix::from(&self.status) as u8))?; - match &self.status { - DkgStatus::Success => { - // No additional data to serialize - } - DkgStatus::Failure(failure) => { - failure.inner_consensus_serialize(fd)?; - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let status_type_prefix_byte = read_next::(fd)?; - let status_type_prefix = DkgStatusTypePrefix::try_from(status_type_prefix_byte)?; - let status = match status_type_prefix { - DkgStatusTypePrefix::Success => DkgStatus::Success, - DkgStatusTypePrefix::Failure => { - let failure = DkgFailure::inner_consensus_deserialize(fd)?; - DkgStatus::Failure(failure) - } - }; - Ok(DkgEnd { - dkg_id, - signer_id, - status, - }) - } -} - -impl StacksMessageCodecExtensions for DkgPublicShares { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(self.comms.len() as u32))?; - for (id, comm) in &self.comms { - write_next(fd, id)?; - comm.id.id.inner_consensus_serialize(fd)?; - comm.id.kG.inner_consensus_serialize(fd)?; - comm.id.kca.inner_consensus_serialize(fd)?; - write_next(fd, &(comm.poly.len() as u32))?; - for poly in comm.poly.iter() { - poly.inner_consensus_serialize(fd)? - } - } - Ok(()) - } - - #[allow(non_snake_case)] - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let num_shares = read_next::(fd)?; - let mut comms = Vec::new(); - for _ in 0..num_shares { - let id = read_next::(fd)?; - let scalar_id = Scalar::inner_consensus_deserialize(fd)?; - let kG = Point::inner_consensus_deserialize(fd)?; - let kca = Scalar::inner_consensus_deserialize(fd)?; - let num_poly_coeffs = read_next::(fd)?; - let mut poly = Vec::new(); - for _ in 0..num_poly_coeffs { - poly.push(Point::inner_consensus_deserialize(fd)?); - } - comms.push(( - id, - PolyCommitment { - id: ID { - id: scalar_id, - kG, - kca, - }, - poly, - }, - )); - } - Ok(DkgPublicShares { - dkg_id, - signer_id, - comms, - }) - } -} - -impl StacksMessageCodecExtensions for DkgPrivateShares { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(self.shares.len() as u32))?; - for (id, share_map) in &self.shares { - write_next(fd, id)?; - write_next(fd, &(share_map.len() as u32))?; - for (id, share) in share_map { - write_next(fd, id)?; - write_next(fd, share)?; - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let num_shares = read_next::(fd)?; - let mut shares = Vec::new(); - for _ in 0..num_shares { - let id = read_next::(fd)?; - let num_share_map = read_next::(fd)?; - let mut share_map = HashMap::new(); - for _ in 0..num_share_map { - let id = read_next::(fd)?; - let share: Vec = read_next(fd)?; - share_map.insert(id, share); - } - shares.push((id, share_map)); - } - Ok(DkgPrivateShares { - dkg_id, - signer_id, - shares, - }) - } -} - -impl StacksMessageCodecExtensions for NonceRequest { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &self.message)?; - write_next(fd, &(self.is_taproot as u8))?; - write_next(fd, &(self.merkle_root.is_some() as u8))?; - if let Some(merkle_root) = self.merkle_root { - write_next(fd, &merkle_root)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let message = read_next::, _>(fd)?; - let is_taproot = read_next::(fd)? != 0; - let has_merkle_root = read_next::(fd)? != 0; - let merkle_root = if has_merkle_root { - Some(read_next::<[u8; 32], _>(fd)?) - } else { - None - }; - - Ok(NonceRequest { - dkg_id, - sign_id, - sign_iter_id, - message, - is_taproot, - merkle_root, - }) - } -} - -impl StacksMessageCodecExtensions for NonceResponse { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &self.key_ids)?; - write_next(fd, &(self.nonces.len() as u32))?; - for nonce in &self.nonces { - nonce.D.inner_consensus_serialize(fd)?; - nonce.E.inner_consensus_serialize(fd)?; - } - write_next(fd, &self.message)?; - Ok(()) - } - - #[allow(non_snake_case)] - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let key_ids = read_next::, _>(fd)?; - let num_nonces = read_next::(fd)?; - let mut nonces = Vec::new(); - for _ in 0..num_nonces { - let D = Point::inner_consensus_deserialize(fd)?; - let E = Point::inner_consensus_deserialize(fd)?; - nonces.push(PublicNonce { D, E }); - } - let message = read_next::, _>(fd)?; - - Ok(NonceResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - key_ids, - nonces, - message, - }) - } -} - -impl StacksMessageCodecExtensions for SignatureShareRequest { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &(self.nonce_responses.len() as u32))?; - for nonce_response in &self.nonce_responses { - nonce_response.inner_consensus_serialize(fd)?; - } - write_next(fd, &self.message)?; - write_next(fd, &(self.is_taproot as u8))?; - write_next(fd, &(self.merkle_root.is_some() as u8))?; - if let Some(merkle_root) = self.merkle_root { - write_next(fd, &merkle_root)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let num_nonce_responses = read_next::(fd)?; - let mut nonce_responses = Vec::new(); - for _ in 0..num_nonce_responses { - nonce_responses.push(NonceResponse::inner_consensus_deserialize(fd)?); - } - let message = read_next::, _>(fd)?; - let is_taproot = read_next::(fd)? != 0; - let has_merkle_root = read_next::(fd)? != 0; - let merkle_root = if has_merkle_root { - Some(read_next::<[u8; 32], _>(fd)?) - } else { - None - }; - - Ok(SignatureShareRequest { - dkg_id, - sign_id, - sign_iter_id, - nonce_responses, - message, - is_taproot, - merkle_root, - }) - } -} - -impl StacksMessageCodecExtensions for SignatureShareResponse { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.dkg_id)?; - write_next(fd, &self.sign_id)?; - write_next(fd, &self.sign_iter_id)?; - write_next(fd, &self.signer_id)?; - write_next(fd, &(self.signature_shares.len() as u32))?; - for share in &self.signature_shares { - write_next(fd, &share.id)?; - share.z_i.inner_consensus_serialize(fd)?; - write_next(fd, &share.key_ids)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let dkg_id = read_next::(fd)?; - let sign_id = read_next::(fd)?; - let sign_iter_id = read_next::(fd)?; - let signer_id = read_next::(fd)?; - let num_shares = read_next::(fd)?; - let mut signature_shares = Vec::new(); - for _ in 0..num_shares { - let id = read_next::(fd)?; - let z_i = Scalar::inner_consensus_deserialize(fd)?; - let key_ids = read_next::, _>(fd)?; - signature_shares.push(SignatureShare { id, z_i, key_ids }); - } - Ok(SignatureShareResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - signature_shares, - }) - } -} - -impl StacksMessageCodecExtensions for Message { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(MessageTypePrefix::from(self) as u8))?; - match self { - Message::DkgBegin(dkg_begin) => { - dkg_begin.inner_consensus_serialize(fd)?; - } - Message::DkgPrivateBegin(dkg_private_begin) => { - dkg_private_begin.inner_consensus_serialize(fd)?; - } - Message::DkgEndBegin(dkg_end_begin) => { - dkg_end_begin.inner_consensus_serialize(fd)?; - } - Message::DkgEnd(dkg_end) => { - dkg_end.inner_consensus_serialize(fd)?; - } - Message::DkgPublicShares(dkg_public_shares) => { - dkg_public_shares.inner_consensus_serialize(fd)?; - } - Message::DkgPrivateShares(dkg_private_shares) => { - dkg_private_shares.inner_consensus_serialize(fd)?; - } - Message::NonceRequest(nonce_request) => { - nonce_request.inner_consensus_serialize(fd)?; - } - Message::NonceResponse(nonce_response) => { - nonce_response.inner_consensus_serialize(fd)?; - } - Message::SignatureShareRequest(signature_share_request) => { - signature_share_request.inner_consensus_serialize(fd)?; - } - Message::SignatureShareResponse(signature_share_response) => { - signature_share_response.inner_consensus_serialize(fd)?; - } - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = MessageTypePrefix::try_from(type_prefix_byte)?; - let message = match type_prefix { - MessageTypePrefix::DkgBegin => { - Message::DkgBegin(DkgBegin::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgPrivateBegin => { - Message::DkgPrivateBegin(DkgPrivateBegin::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgEndBegin => { - Message::DkgEndBegin(DkgEndBegin::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgEnd => Message::DkgEnd(DkgEnd::inner_consensus_deserialize(fd)?), - MessageTypePrefix::DkgPublicShares => { - Message::DkgPublicShares(DkgPublicShares::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::DkgPrivateShares => { - Message::DkgPrivateShares(DkgPrivateShares::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::NonceRequest => { - Message::NonceRequest(NonceRequest::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::NonceResponse => { - Message::NonceResponse(NonceResponse::inner_consensus_deserialize(fd)?) - } - MessageTypePrefix::SignatureShareRequest => Message::SignatureShareRequest( - SignatureShareRequest::inner_consensus_deserialize(fd)?, - ), - MessageTypePrefix::SignatureShareResponse => Message::SignatureShareResponse( - SignatureShareResponse::inner_consensus_deserialize(fd)?, - ), - }; - Ok(message) - } -} - -impl StacksMessageCodecExtensions for Packet { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.msg.inner_consensus_serialize(fd)?; - write_next(fd, &self.sig)?; - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let msg = Message::inner_consensus_deserialize(fd)?; - let sig: Vec = read_next(fd)?; - Ok(Packet { msg, sig }) - } -} - -define_u8_enum!( -/// Enum representing the block response type prefix -BlockResponseTypePrefix { - /// Accepted - Accepted = 0, - /// Rejected - Rejected = 1 -}); - -impl TryFrom for BlockResponseTypePrefix { - type Error = CodecError; - fn try_from(value: u8) -> Result { - Self::from_u8(value).ok_or_else(|| { - CodecError::DeserializeError(format!("Unknown block response type prefix: {value}")) - }) - } -} - -impl From<&BlockResponse> for BlockResponseTypePrefix { - fn from(block_response: &BlockResponse) -> Self { - match block_response { - BlockResponse::Accepted(_) => BlockResponseTypePrefix::Accepted, - BlockResponse::Rejected(_) => BlockResponseTypePrefix::Rejected, - } - } -} - -/// The response that a signer sends back to observing miners -/// either accepting or rejecting a Nakamoto block with the corresponding reason -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum BlockResponse { - /// The Nakamoto block was accepted and therefore signed - Accepted((Sha512Trunc256Sum, ThresholdSignature)), - /// The Nakamoto block was rejected and therefore not signed - Rejected(BlockRejection), -} - -impl std::fmt::Display for BlockResponse { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - BlockResponse::Accepted(a) => { - write!( - f, - "BlockAccepted: signer_sighash = {}, signature = {}", - a.0, a.1 - ) - } - BlockResponse::Rejected(r) => { - write!( - f, - "BlockRejected: signer_sighash = {}, code = {}, reason = {}", - r.reason_code, r.reason, r.signer_signature_hash - ) - } - } - } -} - -impl BlockResponse { - /// Create a new accepted BlockResponse for the provided block signer signature hash and signature - pub fn accepted(hash: Sha512Trunc256Sum, sig: Signature) -> Self { - Self::Accepted((hash, ThresholdSignature(sig))) - } - - /// Create a new rejected BlockResponse for the provided block signer signature hash and signature - pub fn rejected(hash: Sha512Trunc256Sum, sig: Signature) -> Self { - Self::Rejected(BlockRejection::new( - hash, - RejectCode::SignedRejection(ThresholdSignature(sig)), - )) - } -} - -impl StacksMessageCodec for BlockResponse { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(BlockResponseTypePrefix::from(self) as u8))?; - match self { - BlockResponse::Accepted((hash, sig)) => { - write_next(fd, hash)?; - write_next(fd, sig)?; - } - BlockResponse::Rejected(rejection) => { - write_next(fd, rejection)?; - } - }; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = BlockResponseTypePrefix::try_from(type_prefix_byte)?; - let response = match type_prefix { - BlockResponseTypePrefix::Accepted => { - let hash = read_next::(fd)?; - let sig = read_next::(fd)?; - BlockResponse::Accepted((hash, sig)) - } - BlockResponseTypePrefix::Rejected => { - let rejection = read_next::(fd)?; - BlockResponse::Rejected(rejection) - } - }; - Ok(response) - } -} - -/// A rejection response from a signer for a proposed block -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BlockRejection { - /// The reason for the rejection - pub reason: String, - /// The reason code for the rejection - pub reason_code: RejectCode, - /// The signer signature hash of the block that was rejected - pub signer_signature_hash: Sha512Trunc256Sum, -} - -impl BlockRejection { - /// Create a new BlockRejection for the provided block and reason code - pub fn new(signer_signature_hash: Sha512Trunc256Sum, reason_code: RejectCode) -> Self { - Self { - reason: reason_code.to_string(), - reason_code, - signer_signature_hash, - } - } -} - -impl StacksMessageCodec for BlockRejection { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &self.reason.as_bytes().to_vec())?; - write_next(fd, &self.reason_code)?; - write_next(fd, &self.signer_signature_hash)?; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let reason_bytes = read_next::, _>(fd)?; - let reason = String::from_utf8(reason_bytes).map_err(|e| { - CodecError::DeserializeError(format!("Failed to decode reason string: {:?}", &e)) - })?; - let reason_code = read_next::(fd)?; - let signer_signature_hash = read_next::(fd)?; - Ok(Self { - reason, - reason_code, - signer_signature_hash, - }) - } -} - -impl From for BlockRejection { - fn from(reject: BlockValidateReject) -> Self { - Self { - reason: reject.reason, - reason_code: RejectCode::ValidationFailed(reject.reason_code), - signer_signature_hash: reject.signer_signature_hash, - } - } -} - -/// This enum is used to supply a `reason_code` for block rejections -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub enum RejectCode { - /// RPC endpoint Validation failed - ValidationFailed(ValidateRejectCode), - /// Signers signed a block rejection - SignedRejection(ThresholdSignature), - /// Nonce timeout was reached - NonceTimeout(Vec), - /// Insufficient signers agreed to sign the block - InsufficientSigners(Vec), - /// An internal error occurred in the signer when aggregating the signaure - AggregatorError(String), - /// Missing the following expected transactions - MissingTransactions(Vec), - /// The block was rejected due to connectivity issues with the signer - ConnectivityIssues, -} - -impl From<&SignError> for RejectCode { - fn from(err: &SignError) -> Self { - match err { - SignError::NonceTimeout(_valid_signers, malicious_signers) => { - Self::NonceTimeout(malicious_signers.clone()) - } - SignError::InsufficientSigners(malicious_signers) => { - Self::InsufficientSigners(malicious_signers.clone()) - } - SignError::Aggregator(e) => Self::AggregatorError(e.to_string()), - } - } -} - -impl StacksMessageCodec for RejectCode { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - write_next(fd, &(RejectCodeTypePrefix::from(self) as u8))?; - match self { - RejectCode::ValidationFailed(code) => write_next(fd, &(*code as u8))?, - RejectCode::SignedRejection(sig) => write_next(fd, sig)?, - RejectCode::InsufficientSigners(malicious_signers) - | RejectCode::NonceTimeout(malicious_signers) => write_next(fd, malicious_signers)?, - RejectCode::MissingTransactions(missing_transactions) => { - write_next(fd, missing_transactions)? - } - RejectCode::AggregatorError(reason) => write_next(fd, &reason.as_bytes().to_vec())?, - RejectCode::ConnectivityIssues => write_next(fd, &4u8)?, - }; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let type_prefix_byte = read_next::(fd)?; - let type_prefix = RejectCodeTypePrefix::try_from(type_prefix_byte)?; - let code = match type_prefix { - RejectCodeTypePrefix::ValidationFailed => RejectCode::ValidationFailed( - ValidateRejectCode::try_from(read_next::(fd)?).map_err(|e| { - CodecError::DeserializeError(format!( - "Failed to decode validation reject code: {:?}", - &e - )) - })?, - ), - RejectCodeTypePrefix::SignedRejection => { - RejectCode::SignedRejection(read_next::(fd)?) - } - RejectCodeTypePrefix::InsufficientSigners => { - RejectCode::InsufficientSigners(read_next::, _>(fd)?) - } - RejectCodeTypePrefix::MissingTransactions => { - RejectCode::MissingTransactions(read_next::, _>(fd)?) - } - RejectCodeTypePrefix::NonceTimeout => { - RejectCode::NonceTimeout(read_next::, _>(fd)?) - } - RejectCodeTypePrefix::ConnectivityIssues => RejectCode::ConnectivityIssues, - RejectCodeTypePrefix::AggregatorError => { - let reason_bytes = read_next::, _>(fd)?; - let reason = String::from_utf8(reason_bytes).map_err(|e| { - CodecError::DeserializeError(format!( - "Failed to decode reason string: {:?}", - &e - )) - })?; - RejectCode::AggregatorError(reason) - } - }; - Ok(code) - } -} - -impl std::fmt::Display for RejectCode { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - RejectCode::ValidationFailed(code) => write!(f, "Validation failed: {:?}", code), - RejectCode::SignedRejection(sig) => { - write!(f, "A threshold number of signers rejected the block with the following signature: {:?}.", sig) - } - RejectCode::InsufficientSigners(malicious_signers) => write!( - f, - "Insufficient signers agreed to sign the block. The following signers are malicious: {:?}", - malicious_signers - ), - RejectCode::NonceTimeout(malicious_signers) => write!( - f, - "Nonce timeout occurred signers. The following signers are malicious: {:?}", - malicious_signers - ), - RejectCode::MissingTransactions(missing_transactions) => write!( - f, - "Missing the following expected transactions: {:?}", - missing_transactions.iter().map(|tx| tx.txid()).collect::>() - ), - RejectCode::ConnectivityIssues => write!( - f, - "The block was rejected due to connectivity issues with the signer." - ), - RejectCode::AggregatorError(reason) => write!( - f, - "An internal error occurred in the signer when aggregating the signaure: {:?}", - reason - ), - } - } -} - -impl From for SignerMessage { - fn from(packet: Packet) -> Self { - Self::Packet(packet) - } -} - -impl From for SignerMessage { - fn from(block_response: BlockResponse) -> Self { - Self::BlockResponse(block_response) - } -} - -impl From for SignerMessage { - fn from(block_rejection: BlockRejection) -> Self { - Self::BlockResponse(BlockResponse::Rejected(block_rejection)) - } -} - -impl From for SignerMessage { - fn from(rejection: BlockValidateReject) -> Self { - Self::BlockResponse(BlockResponse::Rejected(rejection.into())) - } -} - -#[cfg(test)] -mod test { - use blockstack_lib::chainstate::stacks::{ - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionSmartContract, TransactionVersion, - }; - use blockstack_lib::util_lib::strings::StacksString; - use rand::Rng; - use rand_core::OsRng; - use stacks_common::consts::CHAIN_ID_TESTNET; - use stacks_common::types::chainstate::StacksPrivateKey; - use wsts::common::Signature; - - use super::{StacksMessageCodecExtensions, *}; - - #[test] - #[should_panic] - // V1 signer slots do not have enough slots in Epoch 2.5. Something will need to be updated! - fn signer_slots_count_is_sane() { - let slot_identifiers_len = MessageSlotID::ALL.len(); - assert!( - SIGNER_SLOTS_PER_USER as usize >= slot_identifiers_len, - "stacks_common::SIGNER_SLOTS_PER_USER ({}) must be >= slot identifiers ({})", - SIGNER_SLOTS_PER_USER, - slot_identifiers_len, - ); - } - - #[test] - fn serde_reject_code() { - let code = RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::SignedRejection(ThresholdSignature::empty()); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::InsufficientSigners(vec![0, 1, 2]); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::NonceTimeout(vec![0, 1, 2]); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::AggregatorError("Test Error".into()); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: CHAIN_ID_TESTNET, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - let code = RejectCode::MissingTransactions(vec![tx]); - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - - let code = RejectCode::ConnectivityIssues; - let serialized_code = code.serialize_to_vec(); - let deserialized_code = read_next::(&mut &serialized_code[..]) - .expect("Failed to deserialize RejectCode"); - assert_eq!(code, deserialized_code); - } - - #[test] - fn serde_block_rejection() { - let rejection = BlockRejection::new( - Sha512Trunc256Sum([0u8; 32]), - RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([1u8; 32]), - RejectCode::SignedRejection(ThresholdSignature::empty()), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([2u8; 32]), - RejectCode::InsufficientSigners(vec![0, 1, 2]), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([2u8; 32]), - RejectCode::NonceTimeout(vec![0, 1, 2]), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - - let rejection = BlockRejection::new( - Sha512Trunc256Sum([2u8; 32]), - RejectCode::AggregatorError("Test Error".into()), - ); - let serialized_rejection = rejection.serialize_to_vec(); - let deserialized_rejection = read_next::(&mut &serialized_rejection[..]) - .expect("Failed to deserialize BlockRejection"); - assert_eq!(rejection, deserialized_rejection); - } - - #[test] - fn serde_block_response() { - let response = - BlockResponse::Accepted((Sha512Trunc256Sum([0u8; 32]), ThresholdSignature::empty())); - let serialized_response = response.serialize_to_vec(); - let deserialized_response = read_next::(&mut &serialized_response[..]) - .expect("Failed to deserialize BlockResponse"); - assert_eq!(response, deserialized_response); - - let response = BlockResponse::Rejected(BlockRejection::new( - Sha512Trunc256Sum([1u8; 32]), - RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock), - )); - let serialized_response = response.serialize_to_vec(); - let deserialized_response = read_next::(&mut &serialized_response[..]) - .expect("Failed to deserialize BlockResponse"); - assert_eq!(response, deserialized_response); - } - - #[test] - fn serde_point_scalar() { - let mut rng = OsRng; - let scalar = Scalar::random(&mut rng); - let mut serialized_scalar = vec![]; - scalar - .inner_consensus_serialize(&mut serialized_scalar) - .expect("serialization to buffer failed."); - let deserialized_scalar = Scalar::inner_consensus_deserialize(&mut &serialized_scalar[..]) - .expect("Failed to deserialize Scalar"); - assert_eq!(scalar, deserialized_scalar); - - let point = Point::from(scalar); - let mut serialized_point = vec![]; - point - .inner_consensus_serialize(&mut serialized_point) - .expect("serialization to buffer failed."); - let deserialized_point = Point::inner_consensus_deserialize(&mut &serialized_point[..]) - .expect("Failed to deserialize Point"); - assert_eq!(point, deserialized_point); - } - - fn test_fixture_packet(msg: Message) { - let packet = Packet { - msg, - sig: vec![1u8; 20], - }; - let mut serialized_packet = vec![]; - packet - .inner_consensus_serialize(&mut serialized_packet) - .expect("serialization to buffer failed."); - let deserialized_packet = Packet::inner_consensus_deserialize(&mut &serialized_packet[..]) - .expect("Failed to deserialize Packet"); - assert_eq!(packet, deserialized_packet); - } - - #[test] - fn serde_packet() { - // Test DKG begin Packet - test_fixture_packet(Message::DkgBegin(DkgBegin { dkg_id: 0 })); - - let dkg_id = rand::thread_rng().gen(); - let signer_id = rand::thread_rng().gen(); - let sign_id = rand::thread_rng().gen(); - let sign_iter_id = rand::thread_rng().gen(); - let mut signer_ids = [0u32; 100]; - rand::thread_rng().fill(&mut signer_ids[..]); - - let mut key_ids = [0u32; 100]; - rand::thread_rng().fill(&mut key_ids[..]); - let nmb_items = rand::thread_rng().gen_range(1..100); - - // Test DKG private begin Packet - test_fixture_packet(Message::DkgPrivateBegin(DkgPrivateBegin { - dkg_id, - signer_ids: signer_ids.to_vec(), - key_ids: key_ids.to_vec(), - })); - - // Test DKG end begin Packet - test_fixture_packet(Message::DkgEndBegin(DkgEndBegin { - dkg_id, - signer_ids: signer_ids.to_vec(), - key_ids: key_ids.to_vec(), - })); - - // Test DKG end Packet Success - test_fixture_packet(Message::DkgEnd(DkgEnd { - dkg_id, - signer_id, - status: DkgStatus::Success, - })); - - // Test DKG end Packet Failure - test_fixture_packet(Message::DkgEnd(DkgEnd { - dkg_id, - signer_id, - status: DkgStatus::Failure(DkgFailure::BadState), - })); - - // Test DKG public shares Packet - let rng = &mut OsRng; - let comms = (0..nmb_items) - .map(|i| { - ( - i, - PolyCommitment { - id: ID { - id: Scalar::random(rng), - kG: Point::from(Scalar::random(rng)), - kca: Scalar::random(rng), - }, - poly: vec![ - Point::from(Scalar::random(rng)), - Point::from(Scalar::random(rng)), - ], - }, - ) - }) - .collect(); - test_fixture_packet(Message::DkgPublicShares(DkgPublicShares { - dkg_id, - signer_id, - comms, - })); - - // Test DKG private shares Packet - let mut shares = vec![]; - for i in 0..nmb_items { - let mut shares_map = HashMap::new(); - for i in 0..nmb_items { - let mut bytes = [0u8; 20]; - rng.fill(&mut bytes[..]); - shares_map.insert(i, bytes.to_vec()); - } - shares.push((i, shares_map)); - } - test_fixture_packet(Message::DkgPrivateShares(DkgPrivateShares { - dkg_id, - signer_id, - shares, - })); - - // Test Nonce request Packet with merkle root - let mut message = [0u8; 40]; - rng.fill(&mut message[..]); - let mut merkle_root_bytes = [0u8; 32]; - rng.fill(&mut merkle_root_bytes[..]); - let merkle_root = Some(merkle_root_bytes); - - test_fixture_packet(Message::NonceRequest(NonceRequest { - dkg_id, - sign_id, - sign_iter_id, - message: message.to_vec(), - is_taproot: true, - merkle_root, - })); - - // Test Nonce request Packet with no merkle root - test_fixture_packet(Message::NonceRequest(NonceRequest { - dkg_id, - sign_id, - sign_iter_id, - message: message.to_vec(), - is_taproot: false, - merkle_root: None, - })); - - // Test Nonce response Packet - let mut nonces = vec![]; - for _ in 0..nmb_items { - nonces.push(PublicNonce { - D: Point::from(Scalar::random(rng)), - E: Point::from(Scalar::random(rng)), - }); - } - let nonce_response = NonceResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - key_ids: key_ids.to_vec(), - nonces, - message: message.to_vec(), - }; - test_fixture_packet(Message::NonceResponse(nonce_response.clone())); - - // Test Signature share request Packet with merkle root and nonce response - test_fixture_packet(Message::SignatureShareRequest(SignatureShareRequest { - dkg_id, - sign_id, - sign_iter_id, - nonce_responses: vec![nonce_response], - message: message.to_vec(), - is_taproot: true, - merkle_root, - })); - - // Test Signature share request Packet with no merkle root and nonce response - test_fixture_packet(Message::SignatureShareRequest(SignatureShareRequest { - dkg_id, - sign_id, - sign_iter_id, - nonce_responses: vec![], - message: message.to_vec(), - is_taproot: false, - merkle_root: None, - })); - - // Test Signature share response Packet - let mut signature_shares = vec![]; - for i in 0..nmb_items { - let mut key_ids = vec![]; - for i in 0..nmb_items { - key_ids.push(i); - } - signature_shares.push(SignatureShare { - id: i, - z_i: Scalar::random(rng), - key_ids, - }); - } - test_fixture_packet(Message::SignatureShareResponse(SignatureShareResponse { - dkg_id, - sign_id, - sign_iter_id, - signer_id, - signature_shares, - })); - } - - #[test] - fn serde_signer_message() { - let rng = &mut OsRng; - let signer_message = SignerMessage::Packet(Packet { - msg: Message::DkgBegin(DkgBegin { dkg_id: 0 }), - sig: vec![1u8; 20], - }); - - let serialized_signer_message = signer_message.serialize_to_vec(); - let deserialized_signer_message = - read_next::(&mut &serialized_signer_message[..]) - .expect("Failed to deserialize SignerMessage"); - assert_eq!(signer_message, deserialized_signer_message); - - let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(( - Sha512Trunc256Sum([2u8; 32]), - ThresholdSignature(Signature { - R: Point::from(Scalar::random(rng)), - z: Scalar::random(rng), - }), - ))); - let serialized_signer_message = signer_message.serialize_to_vec(); - let deserialized_signer_message = - read_next::(&mut &serialized_signer_message[..]) - .expect("Failed to deserialize SignerMessage"); - assert_eq!(signer_message, deserialized_signer_message); - - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: CHAIN_ID_TESTNET, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - let signer_message = SignerMessage::Transactions(vec![tx]); - let serialized_signer_message = signer_message.serialize_to_vec(); - let deserialized_signer_message = - read_next::(&mut &serialized_signer_message[..]) - .expect("Failed to deserialize SignerMessage"); - assert_eq!(signer_message, deserialized_signer_message); - } -} diff --git a/libsigner/src/v1/mod.rs b/libsigner/src/v1/mod.rs deleted file mode 100644 index e5a691efb2..0000000000 --- a/libsigner/src/v1/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -/// Messages for the v1 signer -pub mod messages; diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index 75692d83c6..81b4326d4c 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -31,7 +31,6 @@ slog-term = "2.6.0" slog-json = { version = "2.3.0", optional = true } chrono = "0.4.19" libc = "0.2.82" -wsts = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true, optional = true } diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index 034a5a4941..c3b80acac5 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -26,9 +26,6 @@ use secp256k1::{ use serde::de::{Deserialize, Error as de_Error}; use serde::ser::Error as ser_Error; use serde::Serialize; -use wsts::common::Signature as WSTSSignature; -use wsts::curve::point::{Compressed, Point}; -use wsts::curve::scalar::Scalar; use super::hash::Sha256Sum; use crate::impl_byte_array_message_codec; @@ -713,83 +710,4 @@ mod tests { runtime_verify - runtime_recover ); } - - /* - #[test] - fn test_schnorr_signature_serde() { - use wsts::traits::Aggregator; - - // Test that an empty conversion fails. - let empty_signature = SchnorrSignature::default(); - assert!(empty_signature.to_wsts_signature().is_none()); - - // Generate a random Signature and ensure it successfully converts - let mut rng = rand_core::OsRng::default(); - let msg = - "You Idiots! These Are Not Them! You\'ve Captured Their Stunt Doubles!".as_bytes(); - - let num_keys = 10; - let threshold = 7; - let party_key_ids: Vec> = - vec![vec![0, 1, 2], vec![3, 4], vec![5, 6, 7], vec![8, 9]]; - let num_parties = party_key_ids.len().try_into().unwrap(); - - // Create the parties - let mut signers: Vec = party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - num_keys, - threshold, - &mut rng, - ) - }) - .collect(); - - // Generate an aggregate public key - let comms = match wsts::v2::test_helpers::dkg(&mut signers, &mut rng) { - Ok(comms) => comms, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let aggregate_public_key = comms - .iter() - .fold(Point::default(), |s, comm| s + comm.poly[0]); - - // signers [0,1,3] have "threshold" keys - { - let mut signers = [signers[0].clone(), signers[1].clone(), signers[3].clone()].to_vec(); - let mut sig_agg = wsts::v2::Aggregator::new(num_keys, threshold); - - sig_agg.init(comms.clone()).expect("aggregator init failed"); - - let (nonces, sig_shares, key_ids) = - wsts::v2::test_helpers::sign(msg, &mut signers, &mut rng); - let original_signature = sig_agg - .sign(msg, &nonces, &sig_shares, &key_ids) - .expect("aggregator sig failed"); - // Serialize the signature and verify the results - let schnorr_signature = SchnorrSignature::from(&original_signature); - assert_eq!( - schnorr_signature[..33], - original_signature.R.compress().data[..] - ); - assert_eq!(schnorr_signature[33..], original_signature.z.to_bytes()); - - // Deserialize the signature and verify the results - let reverted_signature = schnorr_signature - .to_wsts_signature() - .expect("Failed to convert schnorr signature to wsts signature"); - assert_eq!(reverted_signature.R, original_signature.R); - assert_eq!(reverted_signature.z, original_signature.z); - assert!(original_signature.verify(&aggregate_public_key, msg)); - assert!(reverted_signature.verify(&aggregate_public_key, msg)); - } - } - */ } diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 64e3cd5ca9..da94cc10de 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -43,7 +43,6 @@ tiny_http = { version = "0.12", optional = true } toml = "0.5.6" tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -wsts = { workspace = true } rand = { workspace = true } url = "2.1.0" rusqlite = { workspace = true } diff --git a/stacks-signer/README.md b/stacks-signer/README.md index b3c287d9e3..6e9e0be760 100644 --- a/stacks-signer/README.md +++ b/stacks-signer/README.md @@ -1,6 +1,6 @@ # stacks-signer: Stacks Signer CLI -stacks-signer is a command-line interface (CLI) for executing DKG (Distributed Key Generation) rounds, signing transactions and blocks, and more within the Stacks blockchain ecosystem. This tool provides various subcommands to interact with the StackerDB contract, perform cryptographic operations, and run a Stacks compliant signer. +stacks-signer is a command-line interface (CLI) for operating a Stacks compliant signer. This tool provides various subcommands to interact with the StackerDB contract, generate SIP voting and stacking signatures, and monitoring the Signer network for expected behaviour. ## Installation @@ -25,18 +25,92 @@ To use stacks-signer, you need to build and install the Rust program. You can do ./target/release/stacks-signer --help ``` +4. **Build with Prometheus Metrics Enabled**: You can optionally build and run the stacks-signer with monitoring metrics enabled. + + ```bash + cd stacks-signer + cargo build --release --features "monitoring_prom" + cargo run --features "monitoring_prom" -p stacks-signer run --config + ``` + +You must specify the "metrics_endpoint" option in the config file to serve these metrics. +See [metrics documentation](TODO) for a complete breakdown of the available metrics. + ## Usage The stacks-signer CLI provides the following subcommands: +### `run` + +Start the signer and handle requests to sign Stacks block proposals. + +```bash +./stacks-signer run --config + +``` + +### `monitor-signers` + +Periodically query the current reward cycle's signers' StackerDB slots to verify their operation. + +```bash +./stacks-signer monitor-signers --host --interval --max-age + +``` +- `--host`: The Stacks node to connect to. +- `--interval`: The polling interval in seconds for querying stackerDB. +- `--max-age`: The max age in seconds before a signer message is considered stale. + +### `generate-stacking-signature` + +Generate a signature for stacking. + +```bash +./stacks-signer generate-stacking-signature --config --pox-address
--reward-cycle --period --max-amount --auth-id + +``` +- `--config`: The path to the signer configuration file. +- `--pox-address`: The BTC address used to receive rewards +- `--reward-cycle`: The reward cycle during which this signature is used +- `--method`: Stacking metod that can be used +- `--period`: Number of cycles used as a lock period. Use `1` for stack-aggregation-commit method +- `--max-amount`: The max amount of uSTX that can be used in this unique transaction +- `--auth-id`: A unique identifier to prevent re-using this authorization +- `--json`: Output information in JSON format + +### `generate-vote` + +Generate a vote signature for a specific SIP + +```bash +./stacks-signer generate-vote --config --vote --sip + +``` +- `--config`: The path to the signer configuration file. +- `--vote`: The vote (YES or NO) +- `--sip`: the number of the SIP being voted on + +### `verify-vote` + +Verify the validity of a vote signature for a specific SIP. + +```bash +./stacks-signer verify-vote --public-key --signature --vote --sip + +``` +- `--public-key`: The stacks public key to verify against in hexadecimal format +- `--signature`: The message signature in hexadecimal format +- `--vote`: The vote (YES or NO) +- `--sip`: the number of the SIP being voted on + ### `get-chunk` Retrieve a chunk from the StackerDB instance. ```bash ./stacks-signer get-chunk --host --contract --slot_id --slot_version -``` +``` - `--host`: The stacks node host to connect to. - `--contract`: The contract ID of the StackerDB instance. - `--slot-id`: The slot ID to get. @@ -49,7 +123,6 @@ Retrieve the latest chunk from the StackerDB instance. ```bash ./stacks-signer get-latest-chunk --host --contract --slot-id ``` - - `--host`: The stacks node host to connect to. - `--contract`: The contract ID of the StackerDB instance. - `--slot-id`: The slot ID to get. @@ -71,7 +144,6 @@ Upload a chunk to the StackerDB instance. ```bash ./stacks-signer put-chunk --host --contract --private_key --slot-id --slot-version [--data ] ``` - - `--host`: The stacks node host to connect to. - `--contract`: The contract ID of the StackerDB instance. - `--private_key`: The Stacks private key to use in hexademical format. @@ -79,64 +151,10 @@ Upload a chunk to the StackerDB instance. - `--slot-version`: The slot version to get. - `--data`: The data to upload. If you wish to pipe data using STDIN, use with '-'. -### `dkg` - -Run a distributed key generation round through stacker-db. - -```bash -./stacks-signer dkg --config -``` - -- `--config`: The path to the signer configuration file. - -### `dkg-sign` - -Run a distributed key generation round and sign a given message through stacker-db. - -```bash -./stacks-signer dkg-sign --config [--data ] -``` -- `--config`: The path to the signer configuration file. -- `--data`: The data to sign. If you wish to pipe data using STDIN, use with '-'. - - -### `dkg-sign` - -Sign a given message through stacker-db. - -```bash -./stacks-signer sign --config [--data ] -``` -- `--config`: The path to the signer configuration file. -- `--data`: The data to sign. If you wish to pipe data using STDIN, use with '-'. - -### `run` - -Start the signer and handle requests to sign messages and participate in DKG rounds via stacker-db. -```bash -./stacks-signer run --config -``` -- `--config`: The path to the signer configuration file. - -### `generate-files` - -Generate the necessary files to run a collection of signers to communicate via stacker-db. - -```bash -./stacks-signer generate-files --host --contract --num-signers --num-keys --network --dir -``` -- `--host`: The stacks node host to connect to. -- `--contract`: The contract ID of the StackerDB signer contract. -- `--num-signers`: The number of signers to generate configuration files for. -- `--num-keys`: The total number of key ids to distribute among the signers. -- `--private-keys:` A path to a file containing a list of hexadecimal representations of Stacks private keys. Required if `--num-keys` is not set. -- `--network`: The network to use. One of "mainnet" or "testnet". -- `--dir`: The directory to write files to. Defaults to the current directory. -- `--timeout`: Optional timeout in milliseconds to use when polling for updates in the StackerDB runloop. - ## Contributing To contribute to the stacks-signer project, please read the [Contributing Guidelines](../CONTRIBUTING.md). + ## License This program is open-source software released under the terms of the GNU General Public License (GPL). You should have received a copy of the GNU General Public License along with this program. \ No newline at end of file diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 5dedfe82e3..5e957a2166 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -123,35 +123,28 @@ where #[cfg(test)] pub(crate) mod tests { + use std::collections::{BTreeMap, HashMap}; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpListener}; use blockstack_lib::chainstate::stacks::boot::POX_4_NAME; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; - use blockstack_lib::net::api::getaccount::AccountEntryResponse; use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::{ RPCPoxCurrentCycleInfo, RPCPoxEpoch, RPCPoxInfoData, RPCPoxNextCycleInfo, }; - use blockstack_lib::net::api::postfeerate::{RPCFeeEstimate, RPCFeeEstimateResponse}; use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::costs::ExecutionCost; - use clarity::vm::types::TupleData; use clarity::vm::Value as ClarityValue; - use hashbrown::{HashMap, HashSet}; use libsigner::SignerEntries; use rand::distributions::Standard; use rand::{thread_rng, Rng}; - use rand_core::{OsRng, RngCore}; + use rand_core::RngCore; use stacks_common::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; use stacks_common::types::{StacksEpochId, StacksPublicKeyBuffer}; use stacks_common::util::hash::{Hash160, Sha256Sum}; - use wsts::curve::ecdsa; - use wsts::curve::point::{Compressed, Point}; - use wsts::curve::scalar::Scalar; - use wsts::state_machine::PublicKeys; use super::*; use crate::config::{GlobalConfig, SignerConfig}; @@ -225,28 +218,6 @@ pub(crate) mod tests { ConsensusHash(hash) } - /// Build a response for the get_last_round request - pub fn build_get_last_round_response(round: u64) -> String { - let value = ClarityValue::some(ClarityValue::UInt(round as u128)) - .expect("Failed to create response"); - build_read_only_response(&value) - } - - /// Build a response for the get_account_nonce request - pub fn build_account_nonce_response(nonce: u64) -> String { - let account_nonce_entry = AccountEntryResponse { - nonce, - balance: "0x00000000000000000000000000000000".to_string(), - locked: "0x00000000000000000000000000000000".to_string(), - unlock_height: thread_rng().next_u64(), - balance_proof: None, - nonce_proof: None, - }; - let account_nonce_entry_json = serde_json::to_string(&account_nonce_entry) - .expect("Failed to serialize account nonce entry"); - format!("HTTP/1.1 200 OK\n\n{account_nonce_entry_json}") - } - /// Build a response to get_pox_data_with_retry where it returns a specific reward cycle id and block height pub fn build_get_pox_data_response( reward_cycle: Option, @@ -328,41 +299,6 @@ pub(crate) mod tests { (format!("HTTP/1.1 200 Ok\n\n{pox_info_json}"), pox_info) } - /// Build a response for the get_approved_aggregate_key request - pub fn build_get_approved_aggregate_key_response(point: Option) -> String { - let clarity_value = if let Some(point) = point { - ClarityValue::some( - ClarityValue::buff_from(point.compress().as_bytes().to_vec()) - .expect("BUG: Failed to create clarity value from point"), - ) - .expect("BUG: Failed to create clarity value from point") - } else { - ClarityValue::none() - }; - build_read_only_response(&clarity_value) - } - - /// Build a response for the get_approved_aggregate_key request - pub fn build_get_vote_for_aggregate_key_response(point: Option) -> String { - let clarity_value = if let Some(point) = point { - ClarityValue::some(ClarityValue::Tuple( - TupleData::from_data(vec![ - ( - "aggregate-public-key".into(), - ClarityValue::buff_from(point.compress().as_bytes().to_vec()) - .expect("BUG: Failed to create clarity value from point"), - ), - ("signer-weight".into(), ClarityValue::UInt(1)), // fixed for testing purposes - ]) - .expect("BUG: Failed to create clarity value from tuple data"), - )) - .expect("BUG: Failed to create clarity value from tuple data") - } else { - ClarityValue::none() - }; - build_read_only_response(&clarity_value) - } - /// Build a response for the get_peer_info_with_retry request with a specific stacks tip height and consensus hash pub fn build_get_peer_info_response( burn_block_height: Option, @@ -416,192 +352,70 @@ pub(crate) mod tests { format!("HTTP/1.1 200 OK\n\n{{\"okay\":true,\"result\":\"{hex}\"}}") } - /// Build a response for the get_medium_estimated_fee_ustx_response request with a specific medium estimate - pub fn build_get_medium_estimated_fee_ustx_response( - medium_estimate: u64, - ) -> (String, RPCFeeEstimateResponse) { - // Generate some random info - let fee_response = RPCFeeEstimateResponse { - estimated_cost: ExecutionCost { - write_length: thread_rng().next_u64(), - write_count: thread_rng().next_u64(), - read_length: thread_rng().next_u64(), - read_count: thread_rng().next_u64(), - runtime: thread_rng().next_u64(), - }, - estimated_cost_scalar: thread_rng().next_u64(), - cost_scalar_change_by_byte: thread_rng().next_u32() as f64, - estimations: vec![ - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: thread_rng().next_u64(), - }, - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: medium_estimate, - }, - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: thread_rng().next_u64(), - }, - ], - }; - let fee_response_json = serde_json::to_string(&fee_response) - .expect("Failed to serialize fee estimate response"); - ( - format!("HTTP/1.1 200 OK\n\n{fee_response_json}"), - fee_response, - ) - } - /// Generate a signer config with the given number of signers and keys where the first signer is /// obtained from the provided global config - pub fn generate_signer_config( - config: &GlobalConfig, - num_signers: u32, - num_keys: u32, - ) -> SignerConfig { + pub fn generate_signer_config(config: &GlobalConfig, num_signers: u32) -> SignerConfig { assert!( num_signers > 0, "Cannot generate 0 signers...Specify at least 1 signer." ); - assert!( - num_keys > 0, - "Cannot generate 0 keys for the provided signers...Specify at least 1 key." - ); - let mut public_keys = PublicKeys { - signers: HashMap::new(), - key_ids: HashMap::new(), - }; + + let weight_per_signer = 100 / num_signers; + let mut remaining_weight = 100 % num_signers; + let reward_cycle = thread_rng().next_u64(); - let rng = &mut OsRng; - let num_keys = num_keys / num_signers; - let remaining_keys = num_keys % num_signers; - let mut coordinator_key_ids = HashMap::new(); - let mut signer_key_ids = HashMap::new(); - let mut signer_ids = HashMap::new(); - let mut start_key_id = 1u32; - let mut end_key_id = start_key_id; - let mut signer_public_keys = HashMap::new(); - let mut signer_slot_ids = vec![]; - let ecdsa_private_key = config.ecdsa_private_key; - let ecdsa_public_key = - ecdsa::PublicKey::new(&ecdsa_private_key).expect("Failed to create ecdsa public key"); - // Key ids start from 1 hence the wrapping adds everywhere + + let mut signer_pk_to_id = HashMap::new(); + let mut signer_id_to_pk = HashMap::new(); + let mut signer_addr_to_id = HashMap::new(); + let mut signer_pks = Vec::new(); + let mut signer_slot_ids = Vec::new(); + let mut signer_id_to_addr = BTreeMap::new(); + let mut signer_addr_to_weight = HashMap::new(); + let mut signer_addresses = Vec::new(); + for signer_id in 0..num_signers { - end_key_id = if signer_id.wrapping_add(1) == num_signers { - end_key_id.wrapping_add(remaining_keys) + let private_key = if signer_id == 0 { + config.stacks_private_key } else { - end_key_id.wrapping_add(num_keys) + StacksPrivateKey::new() }; - if signer_id == 0 { - public_keys.signers.insert(signer_id, ecdsa_public_key); - let signer_public_key = - Point::try_from(&Compressed::from(ecdsa_public_key.to_bytes())).unwrap(); - signer_public_keys.insert(signer_id, signer_public_key); - public_keys.signers.insert(signer_id, ecdsa_public_key); - for k in start_key_id..end_key_id { - public_keys.key_ids.insert(k, ecdsa_public_key); - coordinator_key_ids - .entry(signer_id) - .or_insert(HashSet::new()) - .insert(k); - signer_key_ids - .entry(signer_id) - .or_insert(Vec::new()) - .push(k); - } - start_key_id = end_key_id; - let address = StacksAddress::p2pkh( - false, - &StacksPublicKey::from_slice(ecdsa_public_key.to_bytes().as_slice()) - .expect("Failed to create stacks public key"), - ); - signer_slot_ids.push(SignerSlotID(signer_id)); - signer_ids.insert(address, signer_id); + let public_key = StacksPublicKey::from_private(&private_key); - continue; - } - let private_key = Scalar::random(rng); - let public_key = ecdsa::PublicKey::new(&private_key).unwrap(); - let signer_public_key = - Point::try_from(&Compressed::from(public_key.to_bytes())).unwrap(); - signer_public_keys.insert(signer_id, signer_public_key); - public_keys.signers.insert(signer_id, public_key); - for k in start_key_id..end_key_id { - public_keys.key_ids.insert(k, public_key); - coordinator_key_ids - .entry(signer_id) - .or_insert(HashSet::new()) - .insert(k); - signer_key_ids - .entry(signer_id) - .or_insert(Vec::new()) - .push(k); - } - let address = StacksAddress::p2pkh( - false, - &StacksPublicKey::from_slice(public_key.to_bytes().as_slice()) - .expect("Failed to create stacks public key"), - ); + signer_id_to_pk.insert(signer_id, public_key); + signer_pk_to_id.insert(public_key, signer_id); + let address = StacksAddress::p2pkh(false, &public_key); + signer_addr_to_id.insert(address, signer_id); + signer_pks.push(public_key); signer_slot_ids.push(SignerSlotID(signer_id)); - signer_ids.insert(address, signer_id); - start_key_id = end_key_id; + signer_id_to_addr.insert(signer_id, address); + signer_addr_to_weight.insert(address, weight_per_signer + remaining_weight); + signer_addresses.push(address); + remaining_weight = 0; // The first signer gets the extra weight if there is any. All other signers only get the weight_per_signer } SignerConfig { reward_cycle, signer_id: 0, signer_slot_id: SignerSlotID(rand::thread_rng().gen_range(0..num_signers)), // Give a random signer slot id between 0 and num_signers - key_ids: signer_key_ids.get(&0).cloned().unwrap_or_default(), signer_entries: SignerEntries { - public_keys, - coordinator_key_ids, - signer_key_ids, - signer_ids, - signer_public_keys, + signer_addr_to_id, + signer_id_to_pk, + signer_pk_to_id, + signer_pks, + signer_id_to_addr, + signer_addr_to_weight, + signer_addresses, }, signer_slot_ids, - ecdsa_private_key: config.ecdsa_private_key, stacks_private_key: config.stacks_private_key, node_host: config.node_host.to_string(), mainnet: config.network.is_mainnet(), - dkg_end_timeout: config.dkg_end_timeout, - dkg_private_timeout: config.dkg_private_timeout, - dkg_public_timeout: config.dkg_public_timeout, - nonce_timeout: config.nonce_timeout, - sign_timeout: config.sign_timeout, - tx_fee_ustx: config.tx_fee_ustx, - max_tx_fee_ustx: config.max_tx_fee_ustx, db_path: config.db_path.clone(), first_proposal_burn_block_timing: config.first_proposal_burn_block_timing, block_proposal_timeout: config.block_proposal_timeout, } } - pub fn build_get_round_info_response(info: Option<(u64, u64)>) -> String { - let clarity_value = if let Some((vote_count, vote_weight)) = info { - ClarityValue::some(ClarityValue::Tuple( - TupleData::from_data(vec![ - ("votes-count".into(), ClarityValue::UInt(vote_count as u128)), - ( - "votes-weight".into(), - ClarityValue::UInt(vote_weight as u128), - ), - ]) - .expect("BUG: Failed to create clarity value from tuple data"), - )) - .expect("BUG: Failed to create clarity value from tuple data") - } else { - ClarityValue::none() - }; - build_read_only_response(&clarity_value) - } - - pub fn build_get_weight_threshold_response(threshold: u64) -> String { - let clarity_value = ClarityValue::UInt(threshold as u128); - build_read_only_response(&clarity_value) - } - pub fn build_get_tenure_tip_response(header_types: &StacksBlockHeaderTypes) -> String { let response_json = serde_json::to_string(header_types).expect("Failed to serialize tenure tip info"); diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index f2b574ef4f..b3f6528232 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -257,7 +257,7 @@ mod tests { Some(9000), ); let config = GlobalConfig::load_from_str(&signer_config[0]).unwrap(); - let signer_config = generate_signer_config(&config, 5, 20); + let signer_config = generate_signer_config(&config, 5); let mut stackerdb = StackerDB::from(&signer_config); let header = NakamotoBlockHeader::empty(); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 7b490144fc..f415896e86 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -15,11 +15,8 @@ // along with this program. If not, see . use std::collections::{HashMap, VecDeque}; -use blockstack_lib::burnchains::Txid; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; -use blockstack_lib::chainstate::stacks::boot::{ - NakamotoSignerEntry, SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, -}; +use blockstack_lib::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::{ StacksTransaction, StacksTransactionSigner, TransactionAnchorMode, TransactionAuth, @@ -37,9 +34,7 @@ use blockstack_lib::net::api::getstackers::{GetStackersErrors, GetStackersRespon use blockstack_lib::net::api::postblock::StacksBlockAcceptedData; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; use blockstack_lib::net::api::postblock_v3; -use blockstack_lib::net::api::postfeerate::{FeeRateEstimateRequestBody, RPCFeeEstimateResponse}; -use blockstack_lib::util_lib::boot::{boot_code_addr, boot_code_id}; -use clarity::util::hash::to_hex; +use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use libsigner::v0::messages::PeerInfo; @@ -54,7 +49,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::StacksEpochId; use stacks_common::{debug, warn}; -use wsts::curve::point::{Compressed, Point}; use super::SignerSlotID; use crate::client::{retry_with_exponential_backoff, ClientError}; @@ -273,78 +267,6 @@ impl StacksClient { .collect()) } - /// Get the vote for a given round, reward cycle, and signer address - pub fn get_vote_for_aggregate_public_key( - &self, - round: u64, - reward_cycle: u64, - signer: StacksAddress, - ) -> Result, ClientError> { - debug!("Getting vote for aggregate public key..."); - let function_name = ClarityName::from("get-vote"); - let function_args = &[ - ClarityValue::UInt(reward_cycle as u128), - ClarityValue::UInt(round as u128), - ClarityValue::Principal(signer.into()), - ]; - let value = self.read_only_contract_call( - &boot_code_addr(self.mainnet), - &ContractName::from(SIGNERS_VOTING_NAME), - &function_name, - function_args, - )?; - // Return value is of type: - // ```clarity - // (option { aggregate-public-key: (buff 33), signer-weight: uint }) - // ``` - let inner_data = value.expect_optional()?; - if let Some(inner_data) = inner_data { - let tuple = inner_data.expect_tuple()?; - let key_value = tuple.get_owned("aggregate-public-key")?; - self.parse_aggregate_public_key(key_value) - } else { - Ok(None) - } - } - - /// Retrieve the medium estimated transaction fee in uSTX from the stacks node for the given transaction - pub fn get_medium_estimated_fee_ustx( - &self, - tx: &StacksTransaction, - ) -> Result { - debug!("stacks_node_client: Getting estimated fee..."); - let request = FeeRateEstimateRequestBody { - estimated_len: Some(tx.tx_len()), - transaction_payload: to_hex(&tx.payload.serialize_to_vec()), - }; - let timer = - crate::monitoring::new_rpc_call_timer(&self.fees_transaction_path(), &self.http_origin); - let send_request = || { - self.stacks_node_client - .post(self.fees_transaction_path()) - .header("Content-Type", "application/json") - .json(&request) - .send() - .map_err(backoff::Error::transient) - }; - let response = retry_with_exponential_backoff(send_request)?; - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - timer.stop_and_record(); - let fee_estimate_response = response.json::()?; - let fee = fee_estimate_response - .estimations - .get(1) - .map(|estimate| estimate.fee) - .ok_or_else(|| { - ClientError::UnexpectedResponseFormat( - "RPCFeeEstimateResponse missing medium fee estimate".into(), - ) - })?; - Ok(fee) - } - /// Determine the stacks node current epoch pub fn get_node_epoch(&self) -> Result { let pox_info = self.get_pox_data()?; @@ -406,73 +328,6 @@ impl StacksClient { Ok(()) } - /// Retrieve the approved DKG aggregate public key for the given reward cycle - pub fn get_approved_aggregate_key( - &self, - reward_cycle: u64, - ) -> Result, ClientError> { - let function_name = ClarityName::from("get-approved-aggregate-key"); - let voting_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; - let value = self.read_only_contract_call( - &voting_contract_id.issuer.into(), - &voting_contract_id.name, - &function_name, - function_args, - )?; - let inner_data = value.expect_optional()?; - inner_data.map_or_else( - || Ok(None), - |key_value| self.parse_aggregate_public_key(key_value), - ) - } - - /// Retrieve the current consumed weight for the given reward cycle and DKG round - pub fn get_round_vote_weight( - &self, - reward_cycle: u64, - round_id: u64, - ) -> Result, ClientError> { - let function_name = ClarityName::from("get-round-info"); - let pox_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ - ClarityValue::UInt(reward_cycle as u128), - ClarityValue::UInt(round_id as u128), - ]; - let value = self.read_only_contract_call( - &pox_contract_id.issuer.into(), - &pox_contract_id.name, - &function_name, - function_args, - )?; - let inner_data = value.expect_optional()?; - let Some(inner_data) = inner_data else { - return Ok(None); - }; - let round_info = inner_data.expect_tuple()?; - let votes_weight = round_info.get("votes-weight")?.to_owned().expect_u128()?; - Ok(Some(votes_weight)) - } - - /// Retrieve the weight threshold required to approve a DKG vote - pub fn get_vote_threshold_weight(&self, reward_cycle: u64) -> Result { - let function_name = ClarityName::from("get-threshold-weight"); - let pox_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; - let value = self.read_only_contract_call( - &pox_contract_id.issuer.into(), - &pox_contract_id.name, - &function_name, - function_args, - )?; - Ok(value.expect_u128()?) - } - - /// Retrieve the current account nonce for the provided address - pub fn get_account_nonce(&self, address: &StacksAddress) -> Result { - self.get_account_entry(address).map(|entry| entry.nonce) - } - /// Get information about the tenures between `chosen_parent` and `last_sortition` pub fn get_tenure_forking_info( &self, @@ -601,33 +456,6 @@ impl StacksClient { Ok(peer_info_data) } - /// Retrieve the last DKG vote round number for the current reward cycle - pub fn get_last_round(&self, reward_cycle: u64) -> Result, ClientError> { - debug!("Getting the last DKG vote round of reward cycle {reward_cycle}..."); - let contract_addr = boot_code_addr(self.mainnet); - let contract_name = ContractName::from(SIGNERS_VOTING_NAME); - let function_name = ClarityName::from("get-last-round"); - let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; - let opt_value = self - .read_only_contract_call( - &contract_addr, - &contract_name, - &function_name, - function_args, - )? - .expect_optional()?; - let round = if let Some(value) = opt_value { - Some(u64::try_from(value.expect_u128()?).map_err(|e| { - ClientError::MalformedContractData(format!( - "Failed to convert vote round to u64: {e}" - )) - })?) - } else { - None - }; - Ok(round) - } - /// Get the reward set signers from the stacks node for the given reward cycle pub fn get_reward_set_signers( &self, @@ -736,61 +564,6 @@ impl StacksClient { Ok(account_entry) } - /// Helper function that attempts to deserialize a clarity hex string as the aggregate public key - fn parse_aggregate_public_key( - &self, - value: ClarityValue, - ) -> Result, ClientError> { - debug!("Parsing aggregate public key..."); - let data = value.expect_buff(33)?; - // It is possible that the point was invalid though when voted upon and this cannot be prevented by pox 4 definitions... - // Pass up this error if the conversions fail. - let compressed_data = Compressed::try_from(data.as_slice()).map_err(|e| { - ClientError::MalformedClarityValue(format!( - "Failed to convert aggregate public key to compressed data: {e}" - )) - })?; - let dkg_public_key = Point::try_from(&compressed_data).map_err(|e| { - ClientError::MalformedClarityValue(format!( - "Failed to convert aggregate public key to a point: {e}" - )) - })?; - Ok(Some(dkg_public_key)) - } - - /// Helper function to create a stacks transaction for a modifying contract call - pub fn build_unsigned_vote_for_aggregate_public_key( - &self, - signer_index: u32, - round: u64, - dkg_public_key: Point, - reward_cycle: u64, - nonce: u64, - ) -> Result { - debug!("Building {SIGNERS_VOTING_FUNCTION_NAME} transaction..."); - let contract_address = boot_code_addr(self.mainnet); - let contract_name = ContractName::from(SIGNERS_VOTING_NAME); - let function_name = ClarityName::from(SIGNERS_VOTING_FUNCTION_NAME); - let function_args = vec![ - ClarityValue::UInt(signer_index as u128), - ClarityValue::buff_from(dkg_public_key.compress().data.to_vec())?, - ClarityValue::UInt(round as u128), - ClarityValue::UInt(reward_cycle as u128), - ]; - - let unsigned_tx = Self::build_unsigned_contract_call_transaction( - &contract_address, - contract_name, - function_name, - &function_args, - &self.stacks_private_key, - self.tx_version, - self.chain_id, - nonce, - )?; - Ok(unsigned_tx) - } - /// Try to post a completed nakamoto block to our connected stacks-node /// Returns `true` if the block was accepted or `false` if the block /// was rejected. @@ -822,34 +595,6 @@ impl StacksClient { Ok(post_block_resp.accepted) } - /// Helper function to submit a transaction to the Stacks mempool - pub fn submit_transaction(&self, tx: &StacksTransaction) -> Result { - let txid = tx.txid(); - let tx = tx.serialize_to_vec(); - debug!("stacks_node_client: Submitting transaction to the stacks node..."; - "txid" => %txid, - ); - let timer = - crate::monitoring::new_rpc_call_timer(&self.transaction_path(), &self.http_origin); - let send_request = || { - self.stacks_node_client - .post(self.transaction_path()) - .header("Content-Type", "application/octet-stream") - .body(tx.clone()) - .send() - .map_err(|e| { - debug!("Failed to submit transaction to the Stacks node: {e:?}"); - backoff::Error::transient(e) - }) - }; - let response = retry_with_exponential_backoff(send_request)?; - timer.stop_and_record(); - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - Ok(txid) - } - /// Makes a read only contract call to a stacks contract pub fn read_only_contract_call( &self, @@ -905,10 +650,6 @@ impl StacksClient { format!("{}/v2/pox", self.http_origin) } - fn transaction_path(&self) -> String { - format!("{}/v2/transactions", self.http_origin) - } - fn read_only_path( &self, contract_addr: &StacksAddress, @@ -950,10 +691,6 @@ impl StacksClient { format!("{}/v3/stacker_set/{reward_cycle}", self.http_origin) } - fn fees_transaction_path(&self) -> String { - format!("{}/v2/fees/transaction", self.http_origin) - } - fn tenure_tip_path(&self, consensus_hash: &ConsensusHash) -> String { format!("{}/v3/tenures/tip/{}", self.http_origin, consensus_hash) } @@ -1016,7 +753,6 @@ impl StacksClient { #[cfg(test)] mod tests { use std::collections::BTreeMap; - use std::io::{BufWriter, Write}; use std::thread::spawn; use blockstack_lib::burnchains::Address; @@ -1035,17 +771,13 @@ mod tests { use rand::thread_rng; use rand_core::RngCore; use stacks_common::bitvec::BitVec; - use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; - use wsts::curve::scalar::Scalar; + use stacks_common::consts::SIGNER_SLOTS_PER_USER; use super::*; use crate::client::tests::{ - build_account_nonce_response, build_get_approved_aggregate_key_response, - build_get_last_round_response, build_get_last_set_cycle_response, - build_get_medium_estimated_fee_ustx_response, build_get_peer_info_response, - build_get_pox_data_response, build_get_round_info_response, build_get_tenure_tip_response, - build_get_vote_for_aggregate_key_response, build_get_weight_threshold_response, - build_read_only_response, write_response, MockServerClient, + build_get_last_set_cycle_response, build_get_peer_info_response, + build_get_pox_data_response, build_get_tenure_tip_response, build_read_only_response, + write_response, MockServerClient, }; #[test] @@ -1174,149 +906,6 @@ mod tests { assert!(matches!(res, Err(ClientError::ReqwestError(_)))); } - #[test] - fn get_aggregate_public_key_should_succeed() { - let orig_point = Point::from(Scalar::random(&mut rand::thread_rng())); - let response = build_get_approved_aggregate_key_response(Some(orig_point)); - let mock = MockServerClient::new(); - let h = spawn(move || mock.client.get_approved_aggregate_key(0)); - write_response(mock.server, response.as_bytes()); - let res = h.join().unwrap().unwrap(); - assert_eq!(res, Some(orig_point)); - - let response = build_get_approved_aggregate_key_response(None); - let mock = MockServerClient::new(); - let h = spawn(move || mock.client.get_approved_aggregate_key(0)); - write_response(mock.server, response.as_bytes()); - let res = h.join().unwrap().unwrap(); - assert!(res.is_none()); - } - - #[test] - fn parse_valid_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let orig_point = Point::from(Scalar::random(&mut rand::thread_rng())); - let clarity_value = ClarityValue::buff_from(orig_point.compress().as_bytes().to_vec()) - .expect("BUG: Failed to create clarity value from point"); - let result = mock - .client - .parse_aggregate_public_key(clarity_value) - .unwrap(); - assert_eq!(result, Some(orig_point)); - } - - #[test] - fn parse_invalid_aggregate_public_key_should_fail() { - let mock = MockServerClient::new(); - let value = ClarityValue::UInt(10_u128); - let result = mock.client.parse_aggregate_public_key(value); - assert!(result.is_err()) - } - - #[test] - fn transaction_contract_call_should_send_bytes_to_node() { - let mock = MockServerClient::new(); - let private_key = StacksPrivateKey::new(); - let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( - &mock.client.stacks_address, - ContractName::from("contract-name"), - ClarityName::from("function-name"), - &[], - &private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 0, - ) - .unwrap(); - - let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); - - let mut tx_bytes = [0u8; 1024]; - { - let mut tx_bytes_writer = BufWriter::new(&mut tx_bytes[..]); - tx.consensus_serialize(&mut tx_bytes_writer).unwrap(); - tx_bytes_writer.flush().unwrap(); - } - - let bytes_len = tx_bytes - .iter() - .enumerate() - .rev() - .find(|(_, &x)| x != 0) - .unwrap() - .0 - + 1; - - let tx_clone = tx.clone(); - let h = spawn(move || mock.client.submit_transaction(&tx_clone)); - - let request_bytes = write_response( - mock.server, - format!("HTTP/1.1 200 OK\n\n{}", tx.txid()).as_bytes(), - ); - let returned_txid = h.join().unwrap().unwrap(); - - assert_eq!(returned_txid, tx.txid()); - assert!( - request_bytes - .windows(bytes_len) - .any(|window| window == &tx_bytes[..bytes_len]), - "Request bytes did not contain the transaction bytes" - ); - } - - #[test] - fn build_vote_for_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())); - let nonce = thread_rng().next_u64(); - let signer_index = thread_rng().next_u32(); - let round = thread_rng().next_u64(); - let reward_cycle = thread_rng().next_u64(); - - let h = spawn(move || { - mock.client.build_unsigned_vote_for_aggregate_public_key( - signer_index, - round, - point, - reward_cycle, - nonce, - ) - }); - assert!(h.join().unwrap().is_ok()); - } - - #[test] - fn broadcast_vote_for_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())); - let nonce = thread_rng().next_u64(); - let signer_index = thread_rng().next_u32(); - let round = thread_rng().next_u64(); - let reward_cycle = thread_rng().next_u64(); - let unsigned_tx = mock - .client - .build_unsigned_vote_for_aggregate_public_key( - signer_index, - round, - point, - reward_cycle, - nonce, - ) - .unwrap(); - let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); - let tx_clone = tx.clone(); - let h = spawn(move || mock.client.submit_transaction(&tx_clone)); - - write_response( - mock.server, - format!("HTTP/1.1 200 OK\n\n{}", tx.txid()).as_bytes(), - ); - let returned_txid = h.join().unwrap().unwrap(); - - assert_eq!(returned_txid, tx.txid()); - } - #[test] fn core_info_call_for_burn_block_height_should_succeed() { let mock = MockServerClient::new(); @@ -1338,29 +927,6 @@ mod tests { assert!(h.join().unwrap().is_err()); } - #[test] - fn get_account_nonce_should_succeed() { - let mock = MockServerClient::new(); - let address = mock.client.stacks_address; - let h = spawn(move || mock.client.get_account_nonce(&address)); - let nonce = thread_rng().next_u64(); - write_response(mock.server, build_account_nonce_response(nonce).as_bytes()); - let returned_nonce = h.join().unwrap().expect("Failed to deserialize response"); - assert_eq!(returned_nonce, nonce); - } - - #[test] - fn get_account_nonce_should_fail() { - let mock = MockServerClient::new(); - let address = mock.client.stacks_address; - let h = spawn(move || mock.client.get_account_nonce(&address)); - write_response( - mock.server, - b"HTTP/1.1 200 OK\n\n{\"nonce\":\"invalid nonce\",\"balance\":\"0x00000000000000000000000000000000\",\"locked\":\"0x00000000000000000000000000000000\",\"unlock_height\":0}" - ); - assert!(h.join().unwrap().is_err()); - } - #[test] fn parse_valid_signer_slots_should_succeed() { let mock = MockServerClient::new(); @@ -1565,23 +1131,13 @@ mod tests { assert_eq!(reduced_peer_info.server_version, peer_info.server_version); } - #[test] - fn get_last_round_should_succeed() { - let mock = MockServerClient::new(); - let round = rand::thread_rng().next_u64(); - let response = build_get_last_round_response(round); - let h = spawn(move || mock.client.get_last_round(0)); - - write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap().unwrap(), round); - } - #[test] fn get_reward_set_should_succeed() { let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())).compress(); + let private_key = StacksPrivateKey::new(); + let public_key = StacksPublicKey::from_private(&private_key); let mut bytes = [0u8; 33]; - bytes.copy_from_slice(point.as_bytes()); + bytes.copy_from_slice(&public_key.to_bytes_compressed()); let stacker_set = RewardSet { rewarded_addresses: vec![PoxAddress::standard_burn_address(false)], start_cycle_state: PoxStartCycleInfo { @@ -1606,80 +1162,6 @@ mod tests { assert_eq!(h.join().unwrap().unwrap(), stacker_set.signers); } - #[test] - fn get_vote_for_aggregate_public_key_should_succeed() { - let mock = MockServerClient::new(); - let point = Point::from(Scalar::random(&mut rand::thread_rng())); - let stacks_address = mock.client.stacks_address; - let key_response = build_get_vote_for_aggregate_key_response(Some(point)); - let h = spawn(move || { - mock.client - .get_vote_for_aggregate_public_key(0, 0, stacks_address) - }); - write_response(mock.server, key_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), Some(point)); - - let mock = MockServerClient::new(); - let stacks_address = mock.client.stacks_address; - let key_response = build_get_vote_for_aggregate_key_response(None); - let h = spawn(move || { - mock.client - .get_vote_for_aggregate_public_key(0, 0, stacks_address) - }); - write_response(mock.server, key_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), None); - } - - #[test] - fn get_round_vote_weight_should_succeed() { - let mock = MockServerClient::new(); - let vote_count = rand::thread_rng().next_u64(); - let weight = rand::thread_rng().next_u64(); - let round_response = build_get_round_info_response(Some((vote_count, weight))); - let h = spawn(move || mock.client.get_round_vote_weight(0, 0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), Some(weight as u128)); - - let mock = MockServerClient::new(); - let round_response = build_get_round_info_response(None); - let h = spawn(move || mock.client.get_round_vote_weight(0, 0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), None); - } - - #[test] - fn get_vote_threshold_weight_should_succeed() { - let mock = MockServerClient::new(); - let weight = rand::thread_rng().next_u64(); - let round_response = build_get_weight_threshold_response(weight); - let h = spawn(move || mock.client.get_vote_threshold_weight(0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), weight as u128); - } - - #[test] - fn get_medium_estimated_fee_ustx_should_succeed() { - let mock = MockServerClient::new(); - let private_key = StacksPrivateKey::new(); - let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( - &mock.client.stacks_address, - ContractName::from("contract-name"), - ClarityName::from("function-name"), - &[], - &private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 0, - ) - .unwrap(); - - let estimate = thread_rng().next_u64(); - let response = build_get_medium_estimated_fee_ustx_response(estimate).0; - let h = spawn(move || mock.client.get_medium_estimated_fee_ustx(&unsigned_tx)); - write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), estimate); - } - #[test] fn get_tenure_tip_should_succeed() { let mock = MockServerClient::new(); diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index 0ae5be2a22..9f72e171e5 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -29,16 +29,12 @@ use stacks_common::address::{ }; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; -use stacks_common::types::PrivateKey; use stacks_common::util::hash::Hash160; -use wsts::curve::scalar::Scalar; use crate::client::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 45_000; -// Default transaction fee to use in microstacks (if unspecificed in the config file) -const TX_FEE_USTX: u64 = 10_000; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -118,38 +114,20 @@ impl Network { pub struct SignerConfig { /// The reward cycle of the configuration pub reward_cycle: u64, - /// The signer ID assigned to this signer to be used in DKG and Sign rounds + /// The signer ID assigned to this signer (may be different from signer_slot_id) pub signer_id: u32, /// The signer stackerdb slot id (may be different from signer_id) pub signer_slot_id: SignerSlotID, - /// This signer's key ids - pub key_ids: Vec, /// The registered signers for this reward cycle pub signer_entries: SignerEntries, /// The signer slot ids of all signers registered for this reward cycle pub signer_slot_ids: Vec, - /// The Scalar representation of the private key for signer communication - pub ecdsa_private_key: Scalar, /// The private key for this signer pub stacks_private_key: StacksPrivateKey, /// The node host for this signer pub node_host: String, /// Whether this signer is running on mainnet or not pub mainnet: bool, - /// timeout to gather DkgPublicShares messages - pub dkg_public_timeout: Option, - /// timeout to gather DkgPrivateShares messages - pub dkg_private_timeout: Option, - /// timeout to gather DkgEnd messages - pub dkg_end_timeout: Option, - /// timeout to gather nonces - pub nonce_timeout: Option, - /// timeout to gather signature shares - pub sign_timeout: Option, - /// the STX tx fee to use in uSTX. - pub tx_fee_ustx: u64, - /// If set, will use the estimated fee up to this amount. - pub max_tx_fee_ustx: Option, /// The path to the signer's database file pub db_path: PathBuf, /// How much time must pass between the first block proposal in a tenure and the next bitcoin block @@ -166,8 +144,6 @@ pub struct GlobalConfig { pub node_host: String, /// endpoint to the event receiver pub endpoint: SocketAddr, - /// The Scalar representation of the private key for signer communication - pub ecdsa_private_key: Scalar, /// The signer's Stacks private key pub stacks_private_key: StacksPrivateKey, /// The signer's Stacks address @@ -176,20 +152,6 @@ pub struct GlobalConfig { pub network: Network, /// The time to wait for a response from the stacker-db instance pub event_timeout: Duration, - /// timeout to gather DkgPublicShares messages - pub dkg_public_timeout: Option, - /// timeout to gather DkgPrivateShares messages - pub dkg_private_timeout: Option, - /// timeout to gather DkgEnd messages - pub dkg_end_timeout: Option, - /// timeout to gather nonces - pub nonce_timeout: Option, - /// timeout to gather signature shares - pub sign_timeout: Option, - /// the STX tx fee to use in uSTX. - pub tx_fee_ustx: u64, - /// the max STX tx fee to use in uSTX when estimating fees - pub max_tx_fee_ustx: Option, /// the authorization password for the block proposal endpoint pub auth_password: String, /// The path to the signer's database file @@ -217,21 +179,6 @@ struct RawConfigFile { pub network: Network, /// The time to wait (in millisecs) for a response from the stacker-db instance pub event_timeout_ms: Option, - /// timeout in (millisecs) to gather DkgPublicShares messages - pub dkg_public_timeout_ms: Option, - /// timeout in (millisecs) to gather DkgPrivateShares messages - pub dkg_private_timeout_ms: Option, - /// timeout in (millisecs) to gather DkgEnd messages - pub dkg_end_timeout_ms: Option, - /// timeout in (millisecs) to gather nonces - pub nonce_timeout_ms: Option, - /// timeout in (millisecs) to gather signature shares - pub sign_timeout_ms: Option, - /// the STX tx fee to use in uSTX. If not set, will default to TX_FEE_USTX - pub tx_fee_ustx: Option, - /// the max STX tx fee to use in uSTX when estimating fees. - /// If not set, will use tx_fee_ustx. - pub max_tx_fee_ustx: Option, /// The authorization password for the block proposal endpoint pub auth_password: String, /// The path to the signer's database file or :memory: for an in-memory database @@ -290,20 +237,12 @@ impl TryFrom for GlobalConfig { let stacks_private_key = StacksPrivateKey::from_hex(&raw_data.stacks_private_key) .map_err(|e| ConfigError::BadField("stacks_private_key".to_string(), e.into()))?; - - let ecdsa_private_key = Scalar::try_from(&stacks_private_key.to_bytes()[..32]) - .map_err(|e| ConfigError::BadField("stacks_private_key".to_string(), e.to_string()))?; let stacks_public_key = StacksPublicKey::from_private(&stacks_private_key); let signer_hash = Hash160::from_data(stacks_public_key.to_bytes_compressed().as_slice()); let stacks_address = StacksAddress::p2pkh_from_hash(raw_data.network.is_mainnet(), signer_hash); let event_timeout = Duration::from_millis(raw_data.event_timeout_ms.unwrap_or(EVENT_TIMEOUT_MS)); - let dkg_end_timeout = raw_data.dkg_end_timeout_ms.map(Duration::from_millis); - let dkg_public_timeout = raw_data.dkg_public_timeout_ms.map(Duration::from_millis); - let dkg_private_timeout = raw_data.dkg_private_timeout_ms.map(Duration::from_millis); - let nonce_timeout = raw_data.nonce_timeout_ms.map(Duration::from_millis); - let sign_timeout = raw_data.sign_timeout_ms.map(Duration::from_millis); let first_proposal_burn_block_timing = Duration::from_secs(raw_data.first_proposal_burn_block_timing_secs.unwrap_or(30)); let db_path = raw_data.db_path.into(); @@ -331,17 +270,9 @@ impl TryFrom for GlobalConfig { node_host: raw_data.node_host, endpoint, stacks_private_key, - ecdsa_private_key, stacks_address, network: raw_data.network, event_timeout, - dkg_end_timeout, - dkg_public_timeout, - dkg_private_timeout, - nonce_timeout, - sign_timeout, - tx_fee_ustx: raw_data.tx_fee_ustx.unwrap_or(TX_FEE_USTX), - max_tx_fee_ustx: raw_data.max_tx_fee_ustx, auth_password: raw_data.auth_password, db_path, metrics_endpoint, @@ -373,10 +304,6 @@ impl GlobalConfig { /// Return a string with non-sensitive configuration /// information for logging purposes pub fn config_to_log_string(&self) -> String { - let tx_fee = match self.tx_fee_ustx { - 0 => "default".to_string(), - _ => (self.tx_fee_ustx as f64 / 1_000_000.0).to_string(), - }; let metrics_endpoint = match &self.metrics_endpoint { Some(endpoint) => endpoint.to_string(), None => "None".to_string(), @@ -389,7 +316,6 @@ Stacks address: {stacks_address} Public key: {public_key} Network: {network} Database path: {db_path} -DKG transaction fee: {tx_fee} uSTX Metrics endpoint: {metrics_endpoint} "#, node_host = self.node_host, @@ -400,7 +326,6 @@ Metrics endpoint: {metrics_endpoint} ), network = self.network, db_path = self.db_path.to_str().unwrap_or_default(), - tx_fee = tx_fee, metrics_endpoint = metrics_endpoint, ) } @@ -534,119 +459,9 @@ mod tests { RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); assert_eq!(config.auth_password, "melon"); - assert!(config.max_tx_fee_ustx.is_none()); - assert!(config.tx_fee_ustx.is_none()); assert_eq!(config.metrics_endpoint, Some("localhost:4000".to_string())); } - #[test] - fn fee_options_should_deserialize_correctly() { - let pk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let node_host = "localhost"; - let network = Network::Testnet; - let password = "melon"; - - // Test both max_tx_fee_ustx and tx_fee_ustx are unspecified - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - None, - None, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert!(config.max_tx_fee_ustx.is_none()); - assert!(config.tx_fee_ustx.is_none()); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); - - // Test both max_tx_fee_ustx and tx_fee_ustx are specified - let max_tx_fee_ustx = Some(1000); - let tx_fee_ustx = Some(2000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - max_tx_fee_ustx, - tx_fee_ustx, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert_eq!(config.tx_fee_ustx, tx_fee_ustx); - - // Test only max_tx_fee_ustx is specified - let max_tx_fee_ustx = Some(1000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - max_tx_fee_ustx, - None, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert!(config.tx_fee_ustx.is_none()); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); - - // Test only tx_fee_ustx is specified - let tx_fee_ustx = Some(1000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - None, - tx_fee_ustx, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(config.tx_fee_ustx, tx_fee_ustx); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(Some(config.tx_fee_ustx), tx_fee_ustx); - } - #[test] fn test_config_to_string() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); @@ -659,7 +474,6 @@ Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet Database path: :memory: -DKG transaction fee: 0.01 uSTX Metrics endpoint: 0.0.0.0:9090 "#; @@ -670,7 +484,6 @@ Stacks address: ST3FPN8KBZ3YPBP0ZJGAAHTVFMQDTJCR5QPS7VTNJ Public key: 03bc489f27da3701d9f9e577c88de5567cf4023111b7577042d55cde4d823a3505 Network: testnet Database path: :memory: -DKG transaction fee: 0.01 uSTX Metrics endpoint: 0.0.0.0:9090 "#; diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 9d8a22a320..20c2bc2ca8 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -39,10 +39,8 @@ pub mod runloop; pub mod signerdb; /// The util module for the signer pub mod utils; -/// The v0 implementation of the signer. This does not include WSTS support +/// The v0 implementation of the signer. pub mod v0; -/// The v1 implementation of the singer. This includes WSTS support -pub mod v1; #[cfg(test)] mod tests; @@ -59,7 +57,7 @@ use stacks_common::{info, warn}; use crate::client::StacksClient; use crate::config::SignerConfig; -use crate::runloop::{RunLoop, RunLoopCommand}; +use crate::runloop::RunLoop; /// A trait which provides a common `Signer` interface for `v0` and `v1` pub trait Signer: Debug + Display { @@ -76,13 +74,6 @@ pub trait Signer: Debug + Display { res: &Sender>, current_reward_cycle: u64, ); - /// Process a command - fn process_command( - &mut self, - stacks_client: &StacksClient, - current_reward_cycle: u64, - command: Option, - ); /// Check if the signer is in the middle of processing blocks fn has_unprocessed_blocks(&self) -> bool; } @@ -92,14 +83,12 @@ pub type RunningSigner = libsigner::RunningSigner, Vec /// The wrapper for the runloop signer type type RunLoopSigner = - libsigner::Signer, RunLoop, SignerEventReceiver, T>; + libsigner::Signer, RunLoop, SignerEventReceiver, T>; /// The spawned signer pub struct SpawnedSigner + Send, T: SignerEventTrait> { /// The underlying running signer thread handle running_signer: RunningSigner, - /// The command sender for interacting with the running signer - pub cmd_send: Sender, /// The result receiver for interacting with the running signer pub res_recv: Receiver>, /// The spawned signer's config @@ -133,7 +122,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner For more information, check the documentation at \ https://docs.stacks.co/nakamoto-upgrade/signing-and-stacking/faq#what-should-the-networking-setup-for-my-signer-look-like." ); - let (cmd_send, cmd_recv) = channel(); let (res_send, res_recv) = channel(); let ev = SignerEventReceiver::new(config.network.is_mainnet()); #[cfg(feature = "monitoring_prom")] @@ -141,12 +129,10 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SpawnedSigner crate::monitoring::start_serving_monitoring_metrics(config.clone()).ok(); } let runloop = RunLoop::new(config.clone()); - let mut signer: RunLoopSigner = - libsigner::Signer::new(runloop, ev, cmd_recv, res_send); + let mut signer: RunLoopSigner = libsigner::Signer::new(runloop, ev, res_send); let running_signer = signer.spawn(endpoint).expect("Failed to spawn signer"); SpawnedSigner { running_signer, - cmd_send, res_recv, _phantom: std::marker::PhantomData, config, diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 5b118db646..520d455258 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -1,4 +1,4 @@ -//! # stacks-signer: Stacks signer binary for executing DKG rounds, signing transactions and blocks, and more. +//! # stacks-signer: Stacks signer binary for signing block proposals, interacting with stackerdb, and more. //! //! Usage documentation can be found in the [README]("https://github.com/blockstack/stacks-blockchain/stacks-signer/README.md). //! diff --git a/stacks-signer/src/monitoring/mod.rs b/stacks-signer/src/monitoring/mod.rs index 0ecc99b5f8..e03b03d47a 100644 --- a/stacks-signer/src/monitoring/mod.rs +++ b/stacks-signer/src/monitoring/mod.rs @@ -71,52 +71,6 @@ pub fn increment_block_responses_sent(accepted: bool) { } } -/// Increment the signer inbound messages counter -#[allow(unused_variables)] -pub fn increment_signer_inbound_messages(amount: i64) { - #[cfg(feature = "monitoring_prom")] - prometheus::SIGNER_INBOUND_MESSAGES.inc_by(amount); -} - -/// Increment the coordinator inbound messages counter -#[allow(unused_variables)] -pub fn increment_coordinator_inbound_messages(amount: i64) { - #[cfg(feature = "monitoring_prom")] - prometheus::COORDINATOR_INBOUND_MESSAGES.inc_by(amount); -} - -/// Increment the number of inbound packets received -#[allow(unused_variables)] -pub fn increment_inbound_packets(amount: i64) { - #[cfg(feature = "monitoring_prom")] - prometheus::INBOUND_PACKETS_RECEIVED.inc_by(amount); -} - -/// Increment the number of commands processed -#[allow(unused_variables)] -pub fn increment_commands_processed(command_type: &str) { - #[cfg(feature = "monitoring_prom")] - prometheus::COMMANDS_PROCESSED - .with_label_values(&[command_type]) - .inc(); -} - -/// Increment the number of DKG votes submitted -#[allow(unused_variables)] -pub fn increment_dkg_votes_submitted() { - #[cfg(feature = "monitoring_prom")] - prometheus::DGK_VOTES_SUBMITTED.inc(); -} - -/// Increment the number of commands processed -#[allow(unused_variables)] -pub fn increment_operation_results(operation_type: &str) { - #[cfg(feature = "monitoring_prom")] - prometheus::OPERATION_RESULTS - .with_label_values(&[operation_type]) - .inc(); -} - /// Increment the number of block proposals received #[allow(unused_variables)] pub fn increment_block_proposals_received() { diff --git a/stacks-signer/src/monitoring/prometheus.rs b/stacks-signer/src/monitoring/prometheus.rs index c78db1299d..247a9f00f5 100644 --- a/stacks-signer/src/monitoring/prometheus.rs +++ b/stacks-signer/src/monitoring/prometheus.rs @@ -39,38 +39,6 @@ lazy_static! { &["response_type"] ) .unwrap(); - pub static ref SIGNER_INBOUND_MESSAGES: IntCounter = register_int_counter!(opts!( - "stacks_signer_inbound_messages", - "The number of inbound messages received by the signer" - )) - .unwrap(); - pub static ref COORDINATOR_INBOUND_MESSAGES: IntCounter = register_int_counter!(opts!( - "stacks_signer_coordinator_inbound_messages", - "The number of inbound messages received as a coordinator" - )) - .unwrap(); - pub static ref INBOUND_PACKETS_RECEIVED: IntCounter = register_int_counter!(opts!( - "stacks_signer_inbound_packets_received", - "The number of inbound packets received by the signer" - )) - .unwrap(); - pub static ref COMMANDS_PROCESSED: IntCounterVec = register_int_counter_vec!( - "stacks_signer_commands_processed", - "The number of commands processed by the signer", - &["command_type"] - ) - .unwrap(); - pub static ref DGK_VOTES_SUBMITTED: IntCounter = register_int_counter!(opts!( - "stacks_signer_dgk_votes_submitted", - "The number of DGK votes submitted by the signer" - )) - .unwrap(); - pub static ref OPERATION_RESULTS: IntCounterVec = register_int_counter_vec!( - "stacks_signer_operation_results_dkg", - "The number of DKG operation results", - &["operation_type"] - ) - .unwrap(); pub static ref BLOCK_PROPOSALS_RECEIVED: IntCounter = register_int_counter!(opts!( "stacks_signer_block_proposals_received", "The number of block proposals received by the signer" diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 4a22c15bb7..a0e2b739e9 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -1,5 +1,3 @@ -use std::collections::VecDeque; -use std::fmt::Debug; // Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation // Copyright (C) 2020-2024 Stacks Open Internet Foundation // @@ -15,16 +13,15 @@ use std::fmt::Debug; // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::fmt::Debug; use std::sync::mpsc::Sender; use std::time::Duration; use clarity::codec::StacksMessageCodec; use hashbrown::HashMap; -use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerRunLoop}; +use libsigner::{SignerEntries, SignerEvent, SignerRunLoop}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::{debug, error, info, warn}; -use wsts::common::MerkleRoot; -use wsts::state_machine::OperationResult; use crate::chainstate::SortitionsView; use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; @@ -55,14 +52,6 @@ pub struct StateInfo { pub enum SignerResult { /// The signer has received a status check StatusCheck(StateInfo), - /// The signer has completed an operation - OperationResult(OperationResult), -} - -impl From for SignerResult { - fn from(result: OperationResult) -> Self { - SignerResult::OperationResult(result) - } } impl From for SignerResult { @@ -71,31 +60,6 @@ impl From for SignerResult { } } -/// Which signer operation to perform -#[derive(PartialEq, Clone, Debug)] -pub enum SignerCommand { - /// Generate a DKG aggregate public key - Dkg, - /// Sign a message - Sign { - /// The block to sign over - block_proposal: BlockProposal, - /// Whether to make a taproot signature - is_taproot: bool, - /// Taproot merkle root - merkle_root: Option, - }, -} - -/// Which operation to perform -#[derive(PartialEq, Clone, Debug)] -pub struct RunLoopCommand { - /// Which signer operation to perform - pub command: SignerCommand, - /// The reward cycle we are performing the operation for - pub reward_cycle: u64, -} - /// The runloop state #[derive(PartialEq, Eq, Debug, Clone, Copy)] pub enum State { @@ -213,8 +177,6 @@ where pub stacks_signers: HashMap>, /// The state of the runloop pub state: State, - /// The commands received thus far - pub commands: VecDeque, /// The current reward cycle info. Only None if the runloop is uninitialized pub current_reward_cycle_info: Option, /// Cache sortitin data from `stacks-node` @@ -230,7 +192,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo stacks_client, stacks_signers: HashMap::with_capacity(2), state: State::Uninitialized, - commands: VecDeque::new(), current_reward_cycle_info: None, sortition_state: None, } @@ -301,7 +262,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo ); return Ok(None); }; - let Some(signer_id) = signer_entries.signer_ids.get(current_addr) else { + let Some(signer_id) = signer_entries.signer_addr_to_id.get(current_addr) else { warn!( "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." ); @@ -310,30 +271,16 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo info!( "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." ); - let key_ids = signer_entries - .signer_key_ids - .get(signer_id) - .cloned() - .unwrap_or_default(); Ok(Some(SignerConfig { reward_cycle, signer_id: *signer_id, signer_slot_id: *signer_slot_id, - key_ids, signer_entries, signer_slot_ids: signer_slot_ids.into_values().collect(), first_proposal_burn_block_timing: self.config.first_proposal_burn_block_timing, - ecdsa_private_key: self.config.ecdsa_private_key, stacks_private_key: self.config.stacks_private_key, node_host: self.config.node_host.to_string(), mainnet: self.config.network.is_mainnet(), - dkg_end_timeout: self.config.dkg_end_timeout, - dkg_private_timeout: self.config.dkg_private_timeout, - dkg_public_timeout: self.config.dkg_public_timeout, - nonce_timeout: self.config.nonce_timeout, - sign_timeout: self.config.sign_timeout, - tx_fee_ustx: self.config.tx_fee_ustx, - max_tx_fee_ustx: self.config.max_tx_fee_ustx, db_path: self.config.db_path.clone(), block_proposal_timeout: self.config.block_proposal_timeout, })) @@ -499,7 +446,7 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo } impl, T: StacksMessageCodec + Clone + Send + Debug> - SignerRunLoop, RunLoopCommand, T> for RunLoop + SignerRunLoop, T> for RunLoop { fn set_event_timeout(&mut self, timeout: Duration) { self.config.event_timeout = timeout; @@ -512,11 +459,10 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> fn run_one_pass( &mut self, event: Option>, - cmd: Option, res: &Sender>, ) -> Option> { debug!( - "Running one pass for the signer. state={:?}, cmd={cmd:?}, event={event:?}", + "Running one pass for the signer. state={:?}, event={event:?}", self.state ); // This is the only event that we respond to from the outer signer runloop @@ -532,9 +478,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> } } - if let Some(cmd) = cmd { - self.commands.push_back(cmd); - } if self.state == State::Uninitialized { if let Err(e) = self.initialize_runloop() { error!("Failed to initialize signer runloop: {e}."); @@ -567,12 +510,6 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> res, current_reward_cycle, ); - // After processing event, run the next command for each signer - signer.process_command( - &self.stacks_client, - current_reward_cycle, - self.commands.pop_front(), - ); } if self.state == State::NoRegisteredSigners && event.is_some() { let next_reward_cycle = current_reward_cycle.saturating_add(1); @@ -608,8 +545,11 @@ mod tests { } let parsed_entries = SignerEntries::parse(false, &signer_entries).unwrap(); - assert_eq!(parsed_entries.signer_ids.len(), nmb_signers); - let mut signer_ids = parsed_entries.signer_ids.into_values().collect::>(); + assert_eq!(parsed_entries.signer_id_to_pk.len(), nmb_signers); + let mut signer_ids = parsed_entries + .signer_id_to_pk + .into_keys() + .collect::>(); signer_ids.sort(); assert_eq!( signer_ids, diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs index 1b8a57abbb..06b9d703c3 100644 --- a/stacks-signer/src/signerdb.rs +++ b/stacks-signer/src/signerdb.rs @@ -36,7 +36,6 @@ use stacks_common::types::chainstate::ConsensusHash; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::{debug, define_u8_enum, error}; -use wsts::net::NonceRequest; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] /// A vote across the signer set for a block @@ -67,21 +66,6 @@ impl StacksMessageCodec for NakamotoBlockVote { } } -#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] -/// Information specific to Signer V1 -pub struct BlockInfoV1 { - /// The associated packet nonce request if we have one - pub nonce_request: Option, -} - -impl From for BlockInfoV1 { - fn from(value: NonceRequest) -> Self { - Self { - nonce_request: Some(value), - } - } -} - #[derive(Serialize, Deserialize, Debug, PartialEq, Default)] /// Store extra version-specific info in `BlockInfo` pub enum ExtraBlockInfo { @@ -90,28 +74,6 @@ pub enum ExtraBlockInfo { None, /// Extra data for Signer V0 V0, - /// Extra data for Signer V1 - V1(BlockInfoV1), -} - -impl ExtraBlockInfo { - /// Take `nonce_request` if it exists - pub fn take_nonce_request(&mut self) -> Option { - match self { - ExtraBlockInfo::None | ExtraBlockInfo::V0 => None, - ExtraBlockInfo::V1(v1) => v1.nonce_request.take(), - } - } - /// Set `nonce_request` if it exists - pub fn set_nonce_request(&mut self, value: NonceRequest) -> Result<(), &str> { - match self { - ExtraBlockInfo::None | ExtraBlockInfo::V0 => Err("Field doesn't exist"), - ExtraBlockInfo::V1(v1) => { - v1.nonce_request = Some(value); - Ok(()) - } - } - } } define_u8_enum!( @@ -217,14 +179,6 @@ impl From for BlockInfo { } } impl BlockInfo { - /// Create a new BlockInfo with an associated nonce request packet - pub fn new_v1_with_request(block_proposal: BlockProposal, nonce_request: NonceRequest) -> Self { - let mut block_info = BlockInfo::from(block_proposal); - block_info.ext = ExtraBlockInfo::V1(BlockInfoV1::from(nonce_request)); - block_info.signed_over = true; - block_info - } - /// Mark this block as locally accepted, valid, signed over, and records either the self or group signed timestamp in the block info if it wasn't /// already set. pub fn mark_locally_accepted(&mut self, group_signed: bool) -> Result<(), String> { @@ -283,7 +237,10 @@ impl BlockInfo { ) } BlockState::LocallyRejected => { - matches!(prev_state, BlockState::Unprocessed) + matches!( + prev_state, + BlockState::Unprocessed | BlockState::LocallyRejected + ) } BlockState::GloballyAccepted => !matches!(prev_state, BlockState::GloballyRejected), BlockState::GloballyRejected => !matches!(prev_state, BlockState::GloballyAccepted), diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fa34cc4b42..d6eaa37af8 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -12,7 +12,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use std::fmt::Debug; use std::sync::mpsc::Sender; @@ -39,7 +39,7 @@ use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; use crate::config::SignerConfig; -use crate::runloop::{RunLoopCommand, SignerResult}; +use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; @@ -81,7 +81,7 @@ pub struct Signer { /// The reward cycle this signer belongs to pub reward_cycle: u64, /// Reward set signer addresses and their weights - pub signer_weights: HashMap, + pub signer_weights: HashMap, /// SignerDB for state management pub signer_db: SignerDb, /// Configuration for proposal evaluation @@ -259,17 +259,6 @@ impl SignerTrait for Signer { } } - fn process_command( - &mut self, - _stacks_client: &StacksClient, - _current_reward_cycle: u64, - command: Option, - ) { - if let Some(command) = command { - warn!("{self}: Received a command: {command:?}. V0 Signers do not support commands. Ignoring...") - } - } - fn has_unprocessed_blocks(&self) -> bool { self.signer_db .has_unprocessed_blocks(self.reward_cycle) @@ -292,40 +281,13 @@ impl From for Signer { SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); let proposal_config = ProposalEvalConfig::from(&signer_config); - // compute signer addresses *in reward cycle order* - let signer_ids_and_addrs: BTreeMap<_, _> = signer_config - .signer_entries - .signer_ids - .iter() - .map(|(addr, id)| (*id, *addr)) - .collect(); - - let signer_addresses: Vec<_> = signer_ids_and_addrs.into_values().collect(); - - let signer_weights = signer_addresses - .iter() - .map(|addr| { - let Some(signer_id) = signer_config.signer_entries.signer_ids.get(addr) else { - panic!("Malformed config: no signer ID for {}", addr); - }; - let Some(key_ids) = signer_config.signer_entries.signer_key_ids.get(signer_id) - else { - panic!( - "Malformed config: no key IDs for signer ID {} ({})", - signer_id, addr - ); - }; - (*addr, key_ids.len()) - }) - .collect(); - Self { private_key: signer_config.stacks_private_key, stackerdb, mainnet: signer_config.mainnet, signer_id: signer_config.signer_id, - signer_addresses, - signer_weights, + signer_addresses: signer_config.signer_entries.signer_addresses.clone(), + signer_weights: signer_config.signer_entries.signer_addr_to_weight.clone(), signer_slot_ids: signer_config.signer_slot_ids.clone(), reward_cycle: signer_config.reward_cycle, signer_db, @@ -679,22 +641,17 @@ impl Signer { &self, addrs: impl Iterator, ) -> u32 { - let signing_weight = addrs.fold(0usize, |signing_weight, stacker_address| { + addrs.fold(0u32, |signing_weight, stacker_address| { let stacker_weight = self.signer_weights.get(stacker_address).unwrap_or(&0); signing_weight.saturating_add(*stacker_weight) - }); - u32::try_from(signing_weight) - .unwrap_or_else(|_| panic!("FATAL: signing weight exceeds u32::MAX")) + }) } /// Compute the total signing weight fn compute_signature_total_weight(&self) -> u32 { - let total_weight = self - .signer_weights + self.signer_weights .values() - .fold(0usize, |acc, val| acc.saturating_add(*val)); - u32::try_from(total_weight) - .unwrap_or_else(|_| panic!("FATAL: total weight exceeds u32::MAX")) + .fold(0u32, |acc, val| acc.saturating_add(*val)) } /// Handle an observed rejection from another signer diff --git a/stacks-signer/src/v1/coordinator.rs b/stacks-signer/src/v1/coordinator.rs deleted file mode 100644 index 7fc2d238c4..0000000000 --- a/stacks-signer/src/v1/coordinator.rs +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::time::Instant; - -use blockstack_lib::chainstate::burn::ConsensusHashExtensions; -use slog::slog_debug; -use stacks_common::debug; -use stacks_common::types::chainstate::ConsensusHash; -use stacks_common::util::hash::Sha256Sum; -use wsts::curve::ecdsa; -use wsts::state_machine::PublicKeys; - -/// TODO: test this value and adjust as necessary. Maybe make configurable? -pub const COORDINATOR_OPERATION_TIMEOUT_SECS: u64 = 300; - -/// TODO: test this value and adjust as necessary. Maybe make configurable? -pub const COORDINATOR_TENURE_TIMEOUT_SECS: u64 = 600; - -/// The coordinator selector -#[derive(Clone, Debug)] -pub struct CoordinatorSelector { - /// The ordered list of potential coordinators for a specific consensus hash - coordinator_ids: Vec, - /// The current coordinator id - coordinator_id: u32, - /// The current coordinator index into the coordinator ids list - coordinator_index: usize, - /// The last message received time for the current coordinator - pub last_message_time: Option, - /// The time the coordinator started its tenure - tenure_start: Instant, - /// The public keys of the coordinators - public_keys: PublicKeys, -} - -impl From for CoordinatorSelector { - /// Create a new Coordinator selector from the given list of public keys - fn from(public_keys: PublicKeys) -> Self { - let coordinator_ids = - Self::calculate_coordinator_ids(&public_keys, &ConsensusHash::empty()); - let coordinator_id = *coordinator_ids - .first() - .expect("FATAL: No registered signers"); - let coordinator_index = 0; - let last_message_time = None; - let tenure_start = Instant::now(); - Self { - coordinator_ids, - coordinator_id, - coordinator_index, - last_message_time, - tenure_start, - public_keys, - } - } -} - -/// Whether or not to rotate to new coordinators in `update_coordinator` -const ROTATE_COORDINATORS: bool = false; - -impl CoordinatorSelector { - /// Update the coordinator id - fn update_coordinator(&mut self, new_coordinator_ids: Vec) { - self.last_message_time = None; - self.coordinator_index = if new_coordinator_ids != self.coordinator_ids { - // We have advanced our block height and should select from the new list - let mut new_index: usize = 0; - self.coordinator_ids = new_coordinator_ids; - let new_coordinator_id = *self - .coordinator_ids - .first() - .expect("FATAL: No registered signers"); - if ROTATE_COORDINATORS && new_coordinator_id == self.coordinator_id { - // If the newly selected coordinator is the same as the current and we have more than one available, advance immediately to the next - if self.coordinator_ids.len() > 1 { - new_index = new_index.saturating_add(1); - } - } - new_index - } else if ROTATE_COORDINATORS { - self.coordinator_index.saturating_add(1) % self.coordinator_ids.len() - } else { - self.coordinator_index - }; - self.coordinator_id = *self - .coordinator_ids - .get(self.coordinator_index) - .expect("FATAL: Invalid number of registered signers"); - self.tenure_start = Instant::now(); - self.last_message_time = None; - } - - /// Check the coordinator timeouts and update the selected coordinator accordingly - /// Returns the resulting coordinator ID. (Note: it may be unchanged) - pub fn refresh_coordinator(&mut self, pox_consensus_hash: &ConsensusHash) -> u32 { - let new_coordinator_ids = - Self::calculate_coordinator_ids(&self.public_keys, pox_consensus_hash); - if let Some(time) = self.last_message_time { - if time.elapsed().as_secs() > COORDINATOR_OPERATION_TIMEOUT_SECS { - // We have not received a message in a while from this coordinator. - // We should consider the operation finished and use a new coordinator id. - self.update_coordinator(new_coordinator_ids); - } - } else if self.tenure_start.elapsed().as_secs() > COORDINATOR_TENURE_TIMEOUT_SECS - || new_coordinator_ids != self.coordinator_ids - { - // Our tenure has been exceeded or we have advanced our block height and should select from the new list - self.update_coordinator(new_coordinator_ids); - } - self.coordinator_id - } - - /// Get the current coordinator id and public key - pub fn get_coordinator(&self) -> (u32, ecdsa::PublicKey) { - ( - self.coordinator_id, - *self - .public_keys - .signers - .get(&self.coordinator_id) - .expect("FATAL: missing public key for selected coordinator id"), - ) - } - - /// Calculate the ordered list of coordinator ids by comparing the provided public keys - pub fn calculate_coordinator_ids( - public_keys: &PublicKeys, - pox_consensus_hash: &ConsensusHash, - ) -> Vec { - debug!("Using pox_consensus_hash {pox_consensus_hash:?} for selecting coordinator"); - // Create combined hash of each signer's public key with pox_consensus_hash - let mut selection_ids = public_keys - .signers - .iter() - .map(|(&id, pk)| { - let pk_bytes = pk.to_bytes(); - let mut buffer = - Vec::with_capacity(pk_bytes.len() + pox_consensus_hash.as_bytes().len()); - buffer.extend_from_slice(&pk_bytes[..]); - buffer.extend_from_slice(pox_consensus_hash.as_bytes()); - let digest = Sha256Sum::from_data(&buffer).as_bytes().to_vec(); - (id, digest) - }) - .collect::>(); - - // Sort the selection IDs based on the hash - selection_ids.sort_by_key(|(_, hash)| hash.clone()); - // Return only the ids - selection_ids.iter().map(|(id, _)| *id).collect() - } -} -#[cfg(test)] -mod tests { - use super::*; - use crate::client::tests::{generate_random_consensus_hash, generate_signer_config}; - use crate::config::GlobalConfig; - - #[test] - fn calculate_coordinator_different_consensus_hashes_produces_unique_results() { - let number_of_tests = 5; - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); - let public_keys = generate_signer_config(&config, 10, 4000) - .signer_entries - .public_keys; - let mut results = Vec::new(); - - for _ in 0..number_of_tests { - let result = CoordinatorSelector::calculate_coordinator_ids( - &public_keys, - &generate_random_consensus_hash(), - ); - results.push(result); - } - - // Check that not all coordinator IDs are the same - let all_ids_same = results.iter().all(|ids| ids == &results[0]); - assert!(!all_ids_same, "Not all coordinator IDs should be the same"); - } - - fn generate_calculate_coordinator_test_results( - random_consensus: bool, - count: usize, - ) -> Vec> { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); - let public_keys = generate_signer_config(&config, 10, 4000) - .signer_entries - .public_keys; - let mut results = Vec::new(); - let same_hash = generate_random_consensus_hash(); - for _ in 0..count { - let hash = if random_consensus { - generate_random_consensus_hash() - } else { - same_hash - }; - let result = CoordinatorSelector::calculate_coordinator_ids(&public_keys, &hash); - results.push(result); - } - results - } - - #[test] - fn calculate_coordinator_results_should_vary_or_match_based_on_hash() { - let results_with_random_hash = generate_calculate_coordinator_test_results(true, 5); - let all_ids_same = results_with_random_hash - .iter() - .all(|ids| ids == &results_with_random_hash[0]); - assert!(!all_ids_same, "Not all coordinator IDs should be the same"); - - let results_with_static_hash = generate_calculate_coordinator_test_results(false, 5); - let all_ids_same = results_with_static_hash - .iter() - .all(|ids| ids == &results_with_static_hash[0]); - assert!(all_ids_same, "All coordinator IDs should be the same"); - } -} diff --git a/stacks-signer/src/v1/mod.rs b/stacks-signer/src/v1/mod.rs deleted file mode 100644 index ed1d980016..0000000000 --- a/stacks-signer/src/v1/mod.rs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use libsigner::v1::messages::SignerMessage; - -use crate::v1::signer::Signer; - -/// The coordinator selector for the signer -pub mod coordinator; -/// The signer module for processing events -pub mod signer; -/// The stackerdb module for sending messages between signers and miners -pub mod stackerdb_manager; - -/// A v1 spawned signer -pub type SpawnedSigner = crate::SpawnedSigner; diff --git a/stacks-signer/src/v1/signer.rs b/stacks-signer/src/v1/signer.rs deleted file mode 100644 index aa8fcfb0d2..0000000000 --- a/stacks-signer/src/v1/signer.rs +++ /dev/null @@ -1,1764 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -use std::collections::VecDeque; -use std::fmt::Debug; -use std::path::PathBuf; -use std::sync::mpsc::Sender; -use std::time::Instant; - -use blockstack_lib::chainstate::burn::ConsensusHashExtensions; -use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; -use blockstack_lib::chainstate::nakamoto::NakamotoBlock; -use blockstack_lib::chainstate::stacks::boot::SIGNERS_VOTING_FUNCTION_NAME; -use blockstack_lib::chainstate::stacks::StacksTransaction; -use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; -use blockstack_lib::util_lib::db::Error as DBError; -use hashbrown::HashSet; -use libsigner::v1::messages::{ - BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, -}; -use libsigner::{BlockProposal, SignerEvent}; -use rand_core::OsRng; -use slog::{slog_debug, slog_error, slog_info, slog_warn}; -use stacks_common::codec::{read_next, StacksMessageCodec}; -use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::Sha512Trunc256Sum; -use stacks_common::{debug, error, info, warn}; -use wsts::common::Signature; -use wsts::curve::keys::PublicKey; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; -use wsts::net::{Message, NonceRequest, Packet, SignatureShareRequest}; -use wsts::state_machine::coordinator::fire::Coordinator as FireCoordinator; -use wsts::state_machine::coordinator::{ - Config as CoordinatorConfig, Coordinator, State as CoordinatorState, -}; -use wsts::state_machine::signer::Signer as SignerStateMachine; -use wsts::state_machine::{OperationResult, SignError}; -use wsts::traits::Signer as _; -use wsts::v2; - -use super::stackerdb_manager::StackerDBManager; -use crate::chainstate::SortitionsView; -use crate::client::{ClientError, SignerSlotID, StacksClient}; -use crate::config::SignerConfig; -use crate::runloop::{RunLoopCommand, SignerCommand, SignerResult}; -use crate::signerdb::{BlockInfo, NakamotoBlockVote, SignerDb}; -use crate::v1::coordinator::CoordinatorSelector; -use crate::Signer as SignerTrait; - -/// The specific operations that a signer can perform -#[derive(PartialEq, Eq, Debug, Clone)] -pub enum Operation { - /// A DKG operation - Dkg, - /// A Sign operation - Sign, -} - -/// The Signer state -#[derive(PartialEq, Eq, Debug, Clone)] -pub enum State { - /// The signer is uninitialized and should read stackerdb to restore state - Uninitialized, - /// The signer is idle, waiting for messages and commands - Idle, - /// The signer is executing a DKG or Sign round - OperationInProgress(Operation), -} - -/// The stacks signer registered for the reward cycle -#[derive(Debug)] -pub struct Signer { - /// The coordinator for inbound messages for a specific reward cycle - pub coordinator: FireCoordinator, - /// The signing round used to sign messages for a specific reward cycle - pub state_machine: SignerStateMachine, - /// the state of the signer - pub state: State, - /// Received Commands that need to be processed - pub commands: VecDeque, - /// The stackerdb client session manager - pub stackerdb_manager: StackerDBManager, - /// Whether the signer is a mainnet signer or not - pub mainnet: bool, - /// The signer id - pub signer_id: u32, - /// The signer slot ids for the signers in the reward cycle - pub signer_slot_ids: Vec, - /// The addresses of other signers - pub signer_addresses: Vec, - /// The signer slot ids for the signers in the NEXT reward cycle - pub next_signer_slot_ids: Vec, - /// The addresses of the signers for the NEXT reward cycle - pub next_signer_addresses: Vec, - /// The reward cycle this signer belongs to - pub reward_cycle: u64, - /// The default tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0). - pub tx_fee_ustx: u64, - /// If estimating the tx fee, the max tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0) - /// If None, will not cap the fee. - pub max_tx_fee_ustx: Option, - /// The coordinator info for the signer - pub coordinator_selector: CoordinatorSelector, - /// The approved key registered to the contract - pub approved_aggregate_public_key: Option, - /// The current active miner's key (if we know it!) - pub miner_key: Option, - /// Signer DB path - pub db_path: PathBuf, - /// SignerDB for state management - pub signer_db: SignerDb, -} - -impl std::fmt::Display for Signer { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "Cycle #{} Signer #{}(C:{})", - self.reward_cycle, - self.signer_id, - self.coordinator_selector.get_coordinator().0, - ) - } -} - -impl SignerTrait for Signer { - /// Create a new signer from the given configuration - fn new(config: SignerConfig) -> Self { - Self::from(config) - } - - /// Return the reward cycle of the signer - fn reward_cycle(&self) -> u64 { - self.reward_cycle - } - - /// Process the event - fn process_event( - &mut self, - stacks_client: &StacksClient, - _sortition_state: &mut Option, - event: Option<&SignerEvent>, - res: &Sender>, - current_reward_cycle: u64, - ) { - let event_parity = match event { - Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), - // Block proposal events do have reward cycles, but each proposal has its own cycle, - // and the vec could be heterogeneous, so, don't differentiate. - Some(SignerEvent::MinerMessages(..)) - | Some(SignerEvent::NewBurnBlock { .. }) - | Some(SignerEvent::StatusCheck) - | None => None, - Some(SignerEvent::SignerMessages(msg_parity, ..)) => Some(u64::from(*msg_parity) % 2), - }; - let other_signer_parity = (self.reward_cycle + 1) % 2; - if event_parity == Some(other_signer_parity) { - return; - } - if self.approved_aggregate_public_key.is_none() { - if let Err(e) = self.refresh_dkg(stacks_client, res, current_reward_cycle) { - error!("{self}: failed to refresh DKG: {e}"); - } - } - self.refresh_coordinator(); - if self.approved_aggregate_public_key.is_none() { - if let Err(e) = self.refresh_dkg(stacks_client, res, current_reward_cycle) { - error!("{self}: failed to refresh DKG: {e}"); - } - } - self.refresh_coordinator(); - debug!("{self}: Processing event: {event:?}"); - let Some(event) = event else { - // No event. Do nothing. - debug!("{self}: No event received"); - return; - }; - match event { - SignerEvent::BlockValidationResponse(block_validate_response) => { - info!("{self}: Received a block proposal result from the stacks node..."); - self.handle_block_validate_response( - stacks_client, - block_validate_response, - res, - current_reward_cycle, - ) - } - SignerEvent::SignerMessages(signer_set, messages) => { - if *signer_set != self.stackerdb_manager.get_signer_set() { - debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring..."); - return; - } - debug!( - "{self}: Received {} messages from the other signers...", - messages.len() - ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); - } - SignerEvent::MinerMessages(messages, miner_key) => { - let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice()) - .expect("FATAL: could not convert from StacksPublicKey to PublicKey"); - self.miner_key = Some(miner_key); - if current_reward_cycle != self.reward_cycle { - // There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks) - debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring..."); - return; - } - debug!( - "{self}: Received {} messages from the miner", - messages.len(); - "miner_key" => ?miner_key, - ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); - } - SignerEvent::StatusCheck => { - debug!("{self}: Received a status check event.") - } - SignerEvent::NewBurnBlock { - burn_height, - burn_header_hash, - received_time, - } => { - info!("{self}: Received a new burn block event for block height {burn_height}"); - if let Err(e) = - self.signer_db - .insert_burn_block(burn_header_hash, *burn_height, received_time) - { - error!( - "Failed to write burn block event to signerdb"; - "err" => ?e, - "burn_header_hash" => %burn_header_hash, - "burn_height" => burn_height - ); - panic!("Failed to write burn block event to signerdb"); - } - } - } - } - - fn process_command( - &mut self, - stacks_client: &StacksClient, - current_reward_cycle: u64, - command: Option, - ) { - if let Some(command) = command { - let reward_cycle = command.reward_cycle; - if self.reward_cycle != reward_cycle { - warn!( - "{self}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}" - ); - } else { - info!( - "{self}: Queuing an external runloop command ({:?}): {command:?}", - self.state_machine.public_keys.signers.get(&self.signer_id) - ); - self.commands.push_back(command.command); - } - } - self.process_next_command(stacks_client, current_reward_cycle); - } - - fn has_unprocessed_blocks(&self) -> bool { - self.signer_db - .has_unprocessed_blocks(self.reward_cycle) - .unwrap_or_else(|e| { - error!("{self}: Failed to check if there are pending blocks: {e:?}"); - // Assume there are pending blocks to prevent premature cleanup - true - }) - } -} - -impl Signer { - /// Attempt to process the next command in the queue, and update state accordingly - fn process_next_command(&mut self, stacks_client: &StacksClient, current_reward_cycle: u64) { - match &self.state { - State::Uninitialized => { - // We cannot process any commands until we have restored our state - warn!("{self}: Cannot process commands until state is restored. Waiting..."); - } - State::Idle => { - let Some(command) = self.commands.front() else { - debug!("{self}: Nothing to process. Waiting for command..."); - return; - }; - let coordinator_id = if matches!(command, SignerCommand::Dkg) { - // We cannot execute a DKG command if we are not the coordinator - Some(self.get_coordinator_dkg().0) - } else { - self.get_coordinator_sign(current_reward_cycle).0 - }; - if coordinator_id != Some(self.signer_id) { - debug!( - "{self}: Coordinator is {coordinator_id:?}. Will not process any commands...", - ); - return; - } - let command = self - .commands - .pop_front() - .expect("BUG: Already asserted that the command queue was not empty"); - self.execute_command(stacks_client, &command); - } - State::OperationInProgress(op) => { - // We cannot execute the next command until the current one is finished... - debug!( - "{self}: Waiting for {op:?} operation to finish. Coordinator state = {:?}", - self.coordinator.state - ); - } - } - } - /// Return the current coordinator. - /// If the current reward cycle is the active reward cycle, this is the miner, - /// so the first element of the tuple will be None (because the miner does not have a signer index). - /// Otherwise, the coordinator is the signer with the index returned by the coordinator selector. - fn get_coordinator_sign(&self, current_reward_cycle: u64) -> (Option, PublicKey) { - if self.reward_cycle == current_reward_cycle { - let Some(ref cur_miner) = self.miner_key else { - error!( - "Signer #{}: Could not lookup current miner while in active reward cycle", - self.signer_id - ); - let selected = self.coordinator_selector.get_coordinator(); - return (Some(selected.0), selected.1); - }; - // coordinator is the current miner. - (None, *cur_miner) - } else { - let selected = self.coordinator_selector.get_coordinator(); - (Some(selected.0), selected.1) - } - } - - /// Refresh the next signer data from the given configuration data - #[allow(dead_code)] - fn update_signer(&mut self, new_signer_config: &SignerConfig) { - self.next_signer_addresses = new_signer_config - .signer_entries - .signer_ids - .keys() - .copied() - .collect(); - self.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); - } - - /// Get the current coordinator for executing DKG - /// This will always use the coordinator selector to determine the coordinator - fn get_coordinator_dkg(&self) -> (u32, PublicKey) { - self.coordinator_selector.get_coordinator() - } - - /// Read stackerdb messages in case the signer was started late or restarted and missed incoming DKG messages - pub fn read_dkg_stackerdb_messages( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - current_reward_cycle: u64, - ) -> Result<(), ClientError> { - if self.state != State::Uninitialized { - // We should only read stackerdb if we are uninitialized - return Ok(()); - } - let ordered_packets = self - .stackerdb_manager - .get_dkg_packets(&self.signer_slot_ids)? - .iter() - .filter_map(|packet| { - let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { - self.get_coordinator_dkg().1 - } else { - debug!( - "{self}: Received a non-DKG message in the DKG message queue. Ignoring it." - ); - return None; - }; - self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) - }) - .collect::>(); - // We successfully read stackerdb so we are no longer uninitialized - self.state = State::Idle; - debug!( - "{self}: Processing {} DKG messages from stackerdb: {ordered_packets:?}", - ordered_packets.len() - ); - self.handle_packets(stacks_client, res, &ordered_packets, current_reward_cycle); - Ok(()) - } -} - -impl From for Signer { - fn from(signer_config: SignerConfig) -> Self { - let mut stackerdb_manager = StackerDBManager::from(&signer_config); - - let num_signers = signer_config - .signer_entries - .count_signers() - .expect("FATAL: Too many registered signers to fit in a u32"); - let num_keys = signer_config - .signer_entries - .count_keys() - .expect("FATAL: Too many key ids to fit in a u32"); - let threshold = signer_config - .signer_entries - .get_signing_threshold() - .expect("FATAL: Too many key ids to fit in a u32"); - let dkg_threshold = signer_config - .signer_entries - .get_dkg_threshold() - .expect("FATAL: Too many key ids to fit in a u32"); - - let coordinator_config = CoordinatorConfig { - threshold, - dkg_threshold, - num_signers, - num_keys, - message_private_key: signer_config.ecdsa_private_key, - dkg_public_timeout: signer_config.dkg_public_timeout, - dkg_private_timeout: signer_config.dkg_private_timeout, - dkg_end_timeout: signer_config.dkg_end_timeout, - nonce_timeout: signer_config.nonce_timeout, - sign_timeout: signer_config.sign_timeout, - signer_key_ids: signer_config.signer_entries.coordinator_key_ids, - signer_public_keys: signer_config.signer_entries.signer_public_keys, - }; - - let coordinator = FireCoordinator::new(coordinator_config); - let coordinator_selector = - CoordinatorSelector::from(signer_config.signer_entries.public_keys.clone()); - - debug!( - "Reward cycle #{} Signer #{}: initial coordinator is signer {}", - signer_config.reward_cycle, - signer_config.signer_id, - coordinator_selector.get_coordinator().0 - ); - let signer_db = - SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); - - let mut state_machine = SignerStateMachine::new( - threshold, - num_signers, - num_keys, - signer_config.signer_id, - signer_config.key_ids, - signer_config.ecdsa_private_key, - signer_config.signer_entries.public_keys, - ); - - if let Some(state) = load_encrypted_signer_state( - &mut stackerdb_manager, - signer_config.signer_slot_id, - &state_machine.network_private_key, - ).or_else(|err| { - warn!("Failed to load encrypted signer state from StackerDB, falling back to SignerDB: {err}"); - load_encrypted_signer_state( - &signer_db, - signer_config.reward_cycle, - &state_machine.network_private_key) - }).expect("Failed to load encrypted signer state from both StackerDB and SignerDB") { - state_machine.signer = state; - }; - - Self { - coordinator, - state_machine, - state: State::Uninitialized, - commands: VecDeque::new(), - stackerdb_manager, - mainnet: signer_config.mainnet, - signer_id: signer_config.signer_id, - signer_addresses: signer_config - .signer_entries - .signer_ids - .into_keys() - .collect(), - signer_slot_ids: signer_config.signer_slot_ids.clone(), - next_signer_slot_ids: vec![], - next_signer_addresses: vec![], - reward_cycle: signer_config.reward_cycle, - tx_fee_ustx: signer_config.tx_fee_ustx, - max_tx_fee_ustx: signer_config.max_tx_fee_ustx, - coordinator_selector, - approved_aggregate_public_key: None, - miner_key: None, - db_path: signer_config.db_path, - signer_db, - } - } -} - -impl Signer { - /// Refresh the coordinator selector - pub fn refresh_coordinator(&mut self) { - // TODO: do not use an empty consensus hash - let pox_consensus_hash = ConsensusHash::empty(); - let old_coordinator_id = self.coordinator_selector.get_coordinator().0; - let updated_coordinator_id = self - .coordinator_selector - .refresh_coordinator(&pox_consensus_hash); - if old_coordinator_id != updated_coordinator_id { - debug!( - "{self}: Coordinator updated. Resetting state to Idle."; - "old_coordinator_id" => {old_coordinator_id}, - "updated_coordinator_id" => {updated_coordinator_id}, - "pox_consensus_hash" => %pox_consensus_hash - ); - self.coordinator.state = CoordinatorState::Idle; - self.state = State::Idle; - } - } - - /// Finish an operation and update the coordinator selector accordingly - fn finish_operation(&mut self) { - self.state = State::Idle; - self.coordinator_selector.last_message_time = None; - } - - /// Update operation - fn update_operation(&mut self, operation: Operation) { - self.state = State::OperationInProgress(operation); - self.coordinator_selector.last_message_time = Some(Instant::now()); - } - - /// Execute the given command and update state accordingly - fn execute_command(&mut self, stacks_client: &StacksClient, command: &SignerCommand) { - match command { - SignerCommand::Dkg => { - crate::monitoring::increment_commands_processed("dkg"); - if self.approved_aggregate_public_key.is_some() { - debug!("Reward cycle #{} Signer #{}: Already have an aggregate key. Ignoring DKG command.", self.reward_cycle, self.signer_id); - return; - } - let vote_round = match stacks_client.get_last_round(self.reward_cycle) { - Ok(last_round) => last_round, - Err(e) => { - error!("{self}: Unable to perform DKG. Failed to get last round from stacks node: {e:?}"); - return; - } - }; - // The dkg id will increment internally following "start_dkg_round" so do not increment it here - self.coordinator.current_dkg_id = vote_round.unwrap_or(0); - info!( - "{self}: Starting DKG vote"; - "round" => self.coordinator.current_dkg_id.wrapping_add(1), - "cycle" => self.reward_cycle, - ); - match self.coordinator.start_dkg_round() { - Ok(msg) => { - let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); - debug!("{self}: ACK: {ack:?}",); - self.update_operation(Operation::Dkg); - } - Err(e) => { - error!("{self}: Failed to start DKG: {e:?}",); - return; - } - } - self.update_operation(Operation::Dkg); - } - SignerCommand::Sign { - block_proposal, - is_taproot, - merkle_root, - } => { - crate::monitoring::increment_commands_processed("sign"); - if self.approved_aggregate_public_key.is_none() { - debug!("{self}: Cannot sign a block without an approved aggregate public key. Ignore it."); - return; - } - let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); - let mut block_info = self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - .unwrap_or_else(|_| Some(BlockInfo::from(block_proposal.clone()))) - .unwrap_or_else(|| BlockInfo::from(block_proposal.clone())); - if block_info.signed_over { - debug!("{self}: Received a sign command for a block we are already signing over. Ignore it."); - return; - } - info!("{self}: Signing block"; - "block_consensus_hash" => %block_proposal.block.header.consensus_hash, - "block_height" => block_proposal.block.header.chain_length, - "pre_sign_block_id" => %block_proposal.block.block_id(), - ); - match self.coordinator.start_signing_round( - &block_proposal.serialize_to_vec(), - *is_taproot, - *merkle_root, - ) { - Ok(msg) => { - let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); - debug!("{self}: ACK: {ack:?}",); - block_info.signed_over = true; - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|e| { - error!("{self}: Failed to insert block in DB: {e:?}"); - }); - self.update_operation(Operation::Sign); - } - Err(e) => { - error!("{self}: Failed to start signing block: {e:?}",); - return; - } - } - self.update_operation(Operation::Sign); - } - } - } - - /// Handle the block validate response returned from our prior calls to submit a block for validation - fn handle_block_validate_response( - &mut self, - stacks_client: &StacksClient, - block_validate_response: &BlockValidateResponse, - res: &Sender>, - current_reward_cycle: u64, - ) { - let mut block_info = match block_validate_response { - BlockValidateResponse::Ok(block_validate_ok) => { - crate::monitoring::increment_block_validation_responses(true); - let signer_signature_hash = block_validate_ok.signer_signature_hash; - // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}",); - return; - } - }; - let is_valid = self.verify_block_transactions(stacks_client, &block_info.block); - block_info.valid = Some(is_valid); - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - info!( - "{self}: Treating block validation for block {} as valid: {:?}", - &block_info.block.block_id(), - block_info.valid - ); - block_info - } - BlockValidateResponse::Reject(block_validate_reject) => { - crate::monitoring::increment_block_validation_responses(false); - let signer_signature_hash = block_validate_reject.signer_signature_hash; - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}"); - return; - } - }; - block_info.valid = Some(false); - // Submit a rejection response to the .signers contract for miners - // to observe so they know to send another block and to prove signers are doing work); - warn!("{self}: Broadcasting a block rejection due to stacks node validation failure..."); - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_validate_reject.clone().into()) - { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - block_info - } - }; - if let Some(mut nonce_request) = block_info.ext.take_nonce_request() { - debug!("{self}: Received a block validate response from the stacks node for a block we already received a nonce request for. Responding to the nonce request..."); - // We have received validation from the stacks node. Determine our vote and update the request message - self.determine_vote(&mut block_info, &mut nonce_request); - // Send the nonce request through with our vote - let packet = Packet { - msg: Message::NonceRequest(nonce_request), - sig: vec![], - }; - self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); - } - info!( - "{self}: Received a block validate response"; - "block_hash" => block_info.block.header.block_hash(), - "valid" => block_info.valid, - "signed_over" => block_info.signed_over, - ); - self.signer_db - .insert_block(&block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - } - - /// Handle signer messages submitted to signers stackerdb - fn handle_signer_messages( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - messages: &[SignerMessage], - current_reward_cycle: u64, - ) { - let packets: Vec = messages - .iter() - .filter_map(|msg| match msg { - SignerMessage::DkgResults { .. } - | SignerMessage::BlockResponse(_) - | SignerMessage::EncryptedSignerState(_) - | SignerMessage::Transactions(_) => None, - // TODO: if a signer tries to trigger DKG and we already have one set in the contract, ignore the request. - SignerMessage::Packet(packet) => { - let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { - self.get_coordinator_dkg().1 - } else { - self.get_coordinator_sign(current_reward_cycle).1 - }; - self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) - } - }) - .collect(); - self.handle_packets(stacks_client, res, &packets, current_reward_cycle); - } - - /// Helper function for determining if the provided message is a DKG specific message - fn is_dkg_message(msg: &Message) -> bool { - matches!( - msg, - Message::DkgBegin(_) - | Message::DkgEnd(_) - | Message::DkgEndBegin(_) - | Message::DkgPrivateBegin(_) - | Message::DkgPrivateShares(_) - | Message::DkgPublicShares(_) - ) - } - - /// Process inbound packets as both a signer and a coordinator - /// Will send outbound packets and operation results as appropriate - fn handle_packets( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - packets: &[Packet], - current_reward_cycle: u64, - ) { - if let Ok(packets_len) = packets.len().try_into() { - crate::monitoring::increment_inbound_packets(packets_len); - } - let signer_outbound_messages = self - .state_machine - .process_inbound_messages(packets) - .unwrap_or_else(|e| { - error!("{self}: Failed to process inbound messages as a signer: {e:?}",); - vec![] - }); - - // Next process the message as the coordinator - let (coordinator_outbound_messages, operation_results) = if self.reward_cycle - != current_reward_cycle - { - self.coordinator - .process_inbound_messages(packets) - .unwrap_or_else(|e| { - error!("{self}: Failed to process inbound messages as a coordinator: {e:?}"); - (vec![], vec![]) - }) - } else { - (vec![], vec![]) - }; - - if !operation_results.is_empty() { - // We have finished a signing or DKG round, either successfully or due to error. - // Regardless of the why, update our state to Idle as we should not expect the operation to continue. - self.process_operation_results(stacks_client, &operation_results); - self.send_operation_results(res, operation_results); - self.finish_operation(); - } else if !packets.is_empty() { - // We have received a message. Update our state accordingly - // Let us be extra explicit in case a new state type gets added to wsts' state machine - match &self.coordinator.state { - CoordinatorState::Idle => {} - CoordinatorState::DkgPublicDistribute - | CoordinatorState::DkgPublicGather - | CoordinatorState::DkgPrivateDistribute - | CoordinatorState::DkgPrivateGather - | CoordinatorState::DkgEndDistribute - | CoordinatorState::DkgEndGather => { - self.update_operation(Operation::Dkg); - } - CoordinatorState::NonceRequest(_, _) - | CoordinatorState::NonceGather(_, _) - | CoordinatorState::SigShareRequest(_, _) - | CoordinatorState::SigShareGather(_, _) => { - self.update_operation(Operation::Sign); - } - } - } - - if packets - .iter() - .any(|packet| matches!(packet.msg, Message::DkgEnd(_))) - { - debug!("{self}: Saving signer state"); - self.save_signer_state() - .unwrap_or_else(|_| panic!("{self}: Failed to save signer state")); - } - self.send_outbound_messages(signer_outbound_messages); - self.send_outbound_messages(coordinator_outbound_messages); - } - - /// Validate a signature share request, updating its message where appropriate. - /// If the request is for a block it has already agreed to sign, it will overwrite the message with the agreed upon value - /// Returns whether the request is valid or not. - fn validate_signature_share_request(&self, request: &mut SignatureShareRequest) -> bool { - let Some(block_vote): Option = read_next(&mut &request.message[..]).ok() - else { - // We currently reject anything that is not a block vote - debug!( - "{self}: Received a signature share request for an unknown message stream. Reject it.", - ); - return false; - }; - - match self - .signer_db - .block_lookup(self.reward_cycle, &block_vote.signer_signature_hash) - .unwrap_or_else(|_| panic!("{self}: Failed to connect to DB")) - .map(|b| b.vote) - { - Some(Some(vote)) => { - // Overwrite with our agreed upon value in case another message won majority or the coordinator is trying to cheat... - debug!( - "{self}: Set vote (rejected = {}) to {vote:?}", block_vote.rejected; - "requested_sighash" => %block_vote.signer_signature_hash, - ); - request.message = vote.serialize_to_vec(); - true - } - Some(None) => { - // We never agreed to sign this block. Reject it. - // This can happen if the coordinator received enough votes to sign yes - // or no on a block before we received validation from the stacks node. - debug!( - "{self}: Received a signature share request for a block we never agreed to sign. Ignore it."; - "requested_sighash" => %block_vote.signer_signature_hash, - ); - false - } - None => { - // We will only sign across block hashes or block hashes + b'n' byte for - // blocks we have seen a Nonce Request for (and subsequent validation) - // We are missing the context here necessary to make a decision. Reject the block - debug!( - "{self}: Received a signature share request from an unknown block. Reject it."; - "requested_sighash" => %block_vote.signer_signature_hash, - ); - false - } - } - } - - /// Validate a nonce request, updating its message appropriately. - /// If the request is for a block, we will update the request message - /// as either a hash indicating a vote no or the signature hash indicating a vote yes - /// Returns whether the request is valid or not - fn validate_nonce_request( - &mut self, - stacks_client: &StacksClient, - nonce_request: &mut NonceRequest, - ) -> Option { - let Some(block_proposal) = - BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()).ok() - else { - // We currently reject anything that is not a valid block proposal - warn!("{self}: Received a nonce request for an unknown message stream. Reject it.",); - return None; - }; - if block_proposal.reward_cycle != self.reward_cycle { - // We are not signing for this reward cycle. Reject the block - warn!( - "{self}: Received a nonce request for a different reward cycle. Reject it."; - "requested_reward_cycle" => block_proposal.reward_cycle, - ); - return None; - } - // TODO: could add a check to ignore an old burn block height if we know its oudated. Would require us to store the burn block height we last saw on the side. - let signer_signature_hash = block_proposal.block.header.signer_signature_hash(); - let Some(mut block_info) = self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - .expect("Failed to connect to signer DB") - else { - debug!( - "{self}: received a nonce request for a new block. Submit block for validation. "; - "signer_sighash" => %signer_signature_hash, - ); - let block_info = BlockInfo::new_v1_with_request(block_proposal, nonce_request.clone()); - stacks_client - .submit_block_for_validation(block_info.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}",); - }); - return Some(block_info); - }; - - if block_info.valid.is_none() { - // We have not yet received validation from the stacks node. Cache the request and wait for validation - debug!("{self}: We have yet to receive validation from the stacks node for a nonce request. Cache the nonce request and wait for block validation..."); - block_info - .ext - .set_nonce_request(nonce_request.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to set nonce_request: {e:?}",); - }); - return Some(block_info); - } - - self.determine_vote(&mut block_info, nonce_request); - Some(block_info) - } - - /// Verify the transactions in a block are as expected - fn verify_block_transactions( - &mut self, - stacks_client: &StacksClient, - block: &NakamotoBlock, - ) -> bool { - let next_reward_cycle = self.reward_cycle.wrapping_add(1); - let approved_aggregate_public_key = stacks_client - .get_approved_aggregate_key(next_reward_cycle) - .unwrap_or(None); - if approved_aggregate_public_key.is_some() { - // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set for the upcoming signers' reward cycle - // Otherwise it is a waste of block space and time to enforce as the desired outcome has been reached. - debug!("{self}: Already have an aggregate key for the next signer set's reward cycle ({}). Skipping transaction verification...", next_reward_cycle); - return true; - } - if let Ok(expected_transactions) = self.get_expected_transactions(stacks_client) { - //It might be worth building a hashset of the blocks' txids and checking that against the expected transaction's txid. - let block_tx_hashset = block.txs.iter().map(|tx| tx.txid()).collect::>(); - // Ensure the block contains the transactions we expect - let missing_transactions = expected_transactions - .into_iter() - .filter_map(|tx| { - if !block_tx_hashset.contains(&tx.txid()) { - debug!("{self}: expected txid {} is in the block", &tx.txid()); - Some(tx) - } else { - debug!("{self}: missing expected txid {}", &tx.txid()); - None - } - }) - .collect::>(); - let is_valid = missing_transactions.is_empty(); - if !is_valid { - debug!("{self}: Broadcasting a block rejection due to missing expected transactions..."); - let block_rejection = BlockRejection::new( - block.header.signer_signature_hash(), - RejectCode::MissingTransactions(missing_transactions), - ); - // Submit signature result to miners to observe - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_rejection.into()) - { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - } - is_valid - } else { - // Failed to connect to the stacks node to get transactions. Cannot validate the block. Reject it. - debug!("{self}: Broadcasting a block rejection due to signer connectivity issues...",); - let block_rejection = BlockRejection::new( - block.header.signer_signature_hash(), - RejectCode::ConnectivityIssues, - ); - // Submit signature result to miners to observe - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_rejection.into()) - { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - false - } - } - - /// Get transactions from stackerdb for the given addresses and account nonces, filtering out any malformed transactions - fn get_signer_transactions( - &mut self, - nonces: &std::collections::HashMap, - ) -> Result, ClientError> { - let transactions: Vec<_> = self - .stackerdb_manager - .get_current_transactions()? - .into_iter() - .filter_map(|tx| { - if !NakamotoSigners::valid_vote_transaction(nonces, &tx, self.mainnet) { - return None; - } - Some(tx) - }) - .collect(); - Ok(transactions) - } - - /// Get the transactions that should be included in the block, filtering out any invalid transactions - fn get_expected_transactions( - &mut self, - stacks_client: &StacksClient, - ) -> Result, ClientError> { - if self.next_signer_slot_ids.is_empty() { - debug!("{self}: No next signers. Skipping transaction retrieval.",); - return Ok(vec![]); - } - // Get all the account nonces for the next signers - let account_nonces = self.get_account_nonces(stacks_client, &self.next_signer_addresses); - let transactions: Vec<_> = self - .stackerdb_manager - .get_next_transactions(&self.next_signer_slot_ids)?; - let mut filtered_transactions = std::collections::HashMap::new(); - NakamotoSigners::update_filtered_transactions( - &mut filtered_transactions, - &account_nonces, - self.mainnet, - transactions, - ); - // We only allow enforcement of one special cased transaction per signer address per block - Ok(filtered_transactions.into_values().collect()) - } - - /// Determine the vote for a block and update the block info and nonce request accordingly - fn determine_vote(&self, block_info: &mut BlockInfo, nonce_request: &mut NonceRequest) { - let rejected = !block_info.valid.unwrap_or(false); - if rejected { - debug!("{self}: Rejecting block {}", block_info.block.block_id()); - } else { - debug!("{self}: Accepting block {}", block_info.block.block_id()); - } - let block_vote = NakamotoBlockVote { - signer_signature_hash: block_info.block.header.signer_signature_hash(), - rejected: !block_info.valid.unwrap_or(false), - }; - let block_vote_bytes = block_vote.serialize_to_vec(); - // Cache our vote - block_info.vote = Some(block_vote); - nonce_request.message = block_vote_bytes; - } - - /// Verify a chunk is a valid wsts packet. Returns the packet if it is valid, else None. - /// NOTE: The packet will be updated if the signer wishes to respond to NonceRequest - /// and SignatureShareRequests with a different message than what the coordinator originally sent. - /// This is done to prevent a malicious coordinator from sending a different message than what was - /// agreed upon and to support the case where the signer wishes to reject a block by voting no - fn verify_packet( - &mut self, - stacks_client: &StacksClient, - mut packet: Packet, - coordinator_public_key: &PublicKey, - ) -> Option { - // We only care about verified wsts packets. Ignore anything else. - if packet.verify(&self.state_machine.public_keys, coordinator_public_key) { - match &mut packet.msg { - Message::SignatureShareRequest(request) => { - if !self.validate_signature_share_request(request) { - return None; - } - } - Message::NonceRequest(request) => { - let Some(updated_block_info) = - self.validate_nonce_request(stacks_client, request) - else { - warn!("Failed to validate and parse nonce request"); - return None; - }; - self.signer_db - .insert_block(&updated_block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - let process_request = updated_block_info.vote.is_some(); - if !process_request { - debug!("Failed to validate nonce request"); - return None; - } - } - _ => { - // Nothing to do for other message types - } - } - Some(packet) - } else { - debug!( - "{self}: Failed to verify wsts packet with {}: {packet:?}", - coordinator_public_key - ); - None - } - } - - /// Processes the operation results, broadcasting block acceptance or rejection messages - /// and DKG vote results accordingly - fn process_operation_results( - &mut self, - stacks_client: &StacksClient, - operation_results: &[OperationResult], - ) { - for operation_result in operation_results { - // Signers only every trigger non-taproot signing rounds over blocks. Ignore SignTaproot results - match operation_result { - OperationResult::Sign(signature) => { - crate::monitoring::increment_operation_results("sign"); - info!("{self}: Received signature result"); - self.process_signature(signature); - } - OperationResult::SignTaproot(_) => { - crate::monitoring::increment_operation_results("sign_taproot"); - debug!("{self}: Received a signature result for a taproot signature. Nothing to broadcast as we currently sign blocks with a FROST signature."); - } - OperationResult::Dkg(aggregate_key) => { - crate::monitoring::increment_operation_results("dkg"); - self.process_dkg(stacks_client, aggregate_key); - } - OperationResult::SignError(e) => { - crate::monitoring::increment_operation_results("sign_error"); - warn!("{self}: Received a Sign error: {e:?}"); - self.process_sign_error(e); - } - OperationResult::DkgError(e) => { - crate::monitoring::increment_operation_results("dkg_error"); - warn!("{self}: Received a DKG error: {e:?}"); - // TODO: process these errors and track malicious signers to report - } - } - } - } - - /// Process a dkg result by broadcasting a vote to the stacks node - fn process_dkg(&mut self, stacks_client: &StacksClient, dkg_public_key: &Point) { - let mut dkg_results_bytes = vec![]; - debug!( - "{self}: Received DKG result. Broadcasting vote to the stacks node..."; - "dkg_public_key" => %dkg_public_key - ); - if let Err(e) = SignerMessage::serialize_dkg_result( - &mut dkg_results_bytes, - dkg_public_key, - self.coordinator.party_polynomials.iter(), - ) { - error!("{}: Failed to serialize DKGResults message for StackerDB, will continue operating.", self.signer_id; - "error" => %e); - } else if let Err(e) = self - .stackerdb_manager - .send_message_bytes_with_retry(&MessageSlotID::DkgResults, dkg_results_bytes) - { - error!("{}: Failed to send DKGResults message to StackerDB, will continue operating.", self.signer_id; - "error" => %e); - } - - // Get our current nonce from the stacks node and compare it against what we have sitting in the stackerdb instance - let signer_address = stacks_client.get_signer_address(); - // Retreieve ALL account nonces as we may have transactions from other signers in our stackerdb slot that we care about - let account_nonces = self.get_account_nonces(stacks_client, &self.signer_addresses); - let account_nonce = account_nonces.get(signer_address).unwrap_or(&0); - let signer_transactions = self - .get_signer_transactions(&account_nonces) - .map_err(|e| { - error!("{self}: Unable to get signer transactions: {e:?}."); - }) - .unwrap_or_default(); - // If we have a transaction in the stackerdb slot, we need to increment the nonce hence the +1, else should use the account nonce - let next_nonce = signer_transactions - .first() - .map(|tx| tx.get_origin_nonce().wrapping_add(1)) - .unwrap_or(*account_nonce); - let epoch = stacks_client - .get_node_epoch() - .unwrap_or(StacksEpochId::Epoch24); - match self.build_dkg_vote(stacks_client, &epoch, next_nonce, *dkg_public_key) { - Ok(new_transaction) => { - if let Err(e) = self.broadcast_dkg_vote( - stacks_client, - epoch, - signer_transactions, - new_transaction, - ) { - warn!( - "{self}: Failed to broadcast DKG public key vote ({dkg_public_key:?}): {e:?}" - ); - } - } - Err(e) => { - warn!( - "{self}: Failed to build DKG public key vote ({dkg_public_key:?}) transaction: {e:?}." - ); - } - } - } - - /// Build a signed DKG vote transaction - fn build_dkg_vote( - &mut self, - stacks_client: &StacksClient, - epoch: &StacksEpochId, - nonce: u64, - dkg_public_key: Point, - ) -> Result { - let mut unsigned_tx = stacks_client.build_unsigned_vote_for_aggregate_public_key( - self.stackerdb_manager.get_signer_slot_id().0, - self.coordinator.current_dkg_id, - dkg_public_key, - self.reward_cycle, - nonce, - )?; - let tx_fee = if epoch < &StacksEpochId::Epoch30 { - info!("{self}: in pre Epoch 3.0 cycles, must set a transaction fee for the DKG vote."); - let fee = if let Some(max_fee) = self.max_tx_fee_ustx { - let estimated_fee = stacks_client - .get_medium_estimated_fee_ustx(&unsigned_tx) - .map_err(|e| { - warn!("{self}: unable to estimate fee for DKG vote transaction: {e:?}."); - e - }) - .unwrap_or(self.tx_fee_ustx); - std::cmp::min(estimated_fee, max_fee) - } else { - self.tx_fee_ustx - }; - debug!("{self}: Using a fee of {fee} uSTX for DKG vote transaction."); - fee - } else { - 0 - }; - unsigned_tx.set_tx_fee(tx_fee); - stacks_client.sign_transaction(unsigned_tx) - } - - // Get the account nonces for the provided list of signer addresses - fn get_account_nonces( - &self, - stacks_client: &StacksClient, - signer_addresses: &[StacksAddress], - ) -> std::collections::HashMap { - let mut account_nonces = std::collections::HashMap::with_capacity(signer_addresses.len()); - for address in signer_addresses { - let Ok(account_nonce) = stacks_client.get_account_nonce(address) else { - warn!("{self}: Unable to get account nonce for address: {address}."); - continue; - }; - account_nonces.insert(*address, account_nonce); - } - account_nonces - } - - /// broadcast the dkg vote transaction according to the current epoch - fn broadcast_dkg_vote( - &mut self, - stacks_client: &StacksClient, - epoch: StacksEpochId, - mut signer_transactions: Vec, - new_transaction: StacksTransaction, - ) -> Result<(), ClientError> { - let txid = new_transaction.txid(); - if self.approved_aggregate_public_key.is_some() { - // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set - info!( - "{self}: Already has an approved aggregate key. Do not broadcast the transaction ({txid:?})." - ); - return Ok(()); - } - if epoch >= StacksEpochId::Epoch30 { - debug!("{self}: Received a DKG result while in epoch 3.0. Broadcast the transaction only to stackerDB."); - } else if epoch == StacksEpochId::Epoch25 { - debug!("{self}: Received a DKG result while in epoch 2.5. Broadcast the transaction to the mempool."); - stacks_client.submit_transaction(&new_transaction)?; - info!("{self}: Submitted DKG vote transaction ({txid:?}) to the mempool"); - } else { - debug!("{self}: Received a DKG result, but are in an unsupported epoch. Do not broadcast the transaction ({}).", new_transaction.txid()); - return Ok(()); - } - // For all Pox-4 epochs onwards, broadcast the results also to stackerDB for other signers/miners to observe - signer_transactions.push(new_transaction); - let signer_message = SignerMessage::Transactions(signer_transactions); - self.stackerdb_manager - .send_message_with_retry(signer_message)?; - crate::monitoring::increment_dkg_votes_submitted(); - info!("{self}: Broadcasted DKG vote transaction ({txid}) to stacker DB"); - Ok(()) - } - - /// Process a signature from a signing round by deserializing the signature and - /// broadcasting an appropriate Reject or Approval message to stackerdb - fn process_signature(&mut self, signature: &Signature) { - // Deserialize the signature result and broadcast an appropriate Reject or Approval message to stackerdb - let message = self.coordinator.get_message(); - let Some(block_vote): Option = read_next(&mut &message[..]).ok() else { - debug!("{self}: Received a signature result for a non-block. Nothing to broadcast."); - return; - }; - - let block_submission = if block_vote.rejected { - crate::monitoring::increment_block_responses_sent(false); - // We signed a rejection message. Return a rejection message - BlockResponse::rejected(block_vote.signer_signature_hash, signature.clone()) - } else { - crate::monitoring::increment_block_responses_sent(true); - // we agreed to sign the block hash. Return an approval message - BlockResponse::accepted(block_vote.signer_signature_hash, signature.clone()) - }; - - // Submit signature result to miners to observe - info!("{self}: Submit block response: {block_submission}"); - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_submission.into()) - { - warn!("{self}: Failed to send block submission to stacker-db: {e:?}"); - } - } - - /// Process a sign error from a signing round, broadcasting a rejection message to stackerdb accordingly - fn process_sign_error(&mut self, e: &SignError) { - let message = self.coordinator.get_message(); - // We do not sign across blocks, but across their hashes. however, the first sign request is always across the block - // so we must handle this case first - - let block: NakamotoBlock = read_next(&mut &message[..]).ok().unwrap_or({ - // This is not a block so maybe its across its hash - let Some(block_vote): Option = read_next(&mut &message[..]).ok() - else { - // This is not a block vote either. We cannot process this error - debug!( - "{self}: Received a signature error for a non-block. Nothing to broadcast." - ); - return; - }; - let Some(block_info) = self - .signer_db - .block_lookup(self.reward_cycle, &block_vote.signer_signature_hash) - .unwrap_or_else(|_| panic!("{self}: Failed to connect to signer DB")) - else { - debug!( - "{self}: Received a signature result for a block we have not seen before. Ignoring..." - ); - return; - }; - block_info.block - }); - let block_rejection = - BlockRejection::new(block.header.signer_signature_hash(), RejectCode::from(e)); - debug!("{self}: Broadcasting block rejection: {block_rejection:?}"); - // Submit signature result to miners to observe - if let Err(e) = self - .stackerdb_manager - .send_message_with_retry(block_rejection.into()) - { - warn!("{self}: Failed to send block rejection submission to stacker-db: {e:?}"); - } - } - - /// Persist signer state in both SignerDB and StackerDB - fn save_signer_state(&mut self) -> Result<(), PersistenceError> { - let rng = &mut OsRng; - - let state = self.state_machine.signer.save(); - let serialized_state = serde_json::to_vec(&state)?; - - let encrypted_state = encrypt( - &self.state_machine.network_private_key, - &serialized_state, - rng, - )?; - - let signerdb_result = self.save_signer_state_in_signerdb(&encrypted_state); - let stackerdb_result = self.save_signer_state_in_stackerdb(encrypted_state); - - if let Err(err) = &signerdb_result { - warn!("{self}: Failed to persist state in SignerDB: {err}"); - } - - if let Err(err) = &stackerdb_result { - warn!("{self}: Failed to persist state in StackerDB: {err}"); - - stackerdb_result - } else { - signerdb_result - } - } - - /// Persist signer state in SignerDB - fn save_signer_state_in_signerdb( - &self, - encrypted_state: &[u8], - ) -> Result<(), PersistenceError> { - self.signer_db - .insert_encrypted_signer_state(self.reward_cycle, encrypted_state)?; - Ok(()) - } - - /// Persist signer state in StackerDB - /// TODO: this is a no-op until the number of signer slots can be expanded - fn save_signer_state_in_stackerdb( - &mut self, - _encrypted_state: Vec, - ) -> Result<(), PersistenceError> { - /* - * This is a no-op until the number of signer slots can be expanded to 14 - * - let message = SignerMessage::EncryptedSignerState(encrypted_state); - self.stackerdb_manager.send_message_with_retry(message)?; - */ - Ok(()) - } - - /// Send any operation results across the provided channel - fn send_operation_results( - &mut self, - res: &Sender>, - operation_results: Vec, - ) { - let nmb_results = operation_results.len(); - match res.send(operation_results.into_iter().map(|r| r.into()).collect()) { - Ok(_) => { - debug!("{self}: Successfully sent {nmb_results} operation result(s)") - } - Err(e) => { - warn!("{self}: Failed to send {nmb_results} operation results: {e:?}"); - } - } - } - - /// Sending all provided packets through stackerdb with a retry - fn send_outbound_messages(&mut self, outbound_messages: Vec) { - debug!( - "{self}: Sending {} messages to other stacker-db instances.", - outbound_messages.len() - ); - for msg in outbound_messages { - let ack = self.stackerdb_manager.send_message_with_retry(msg.into()); - if let Ok(ack) = ack { - debug!("{self}: send outbound ACK: {ack:?}"); - } else { - warn!("{self}: Failed to send message to stacker-db instance: {ack:?}"); - } - } - } - - /// Refresh DKG and queue it if required - pub fn refresh_dkg( - &mut self, - stacks_client: &StacksClient, - res: &Sender>, - current_reward_cycle: u64, - ) -> Result<(), ClientError> { - // First attempt to retrieve the aggregate key from the contract. - self.update_approved_aggregate_key(stacks_client)?; - if self.approved_aggregate_public_key.is_some() { - return Ok(()); - } - // Check stackerdb for any missed DKG messages to catch up our state. - self.read_dkg_stackerdb_messages(stacks_client, res, current_reward_cycle)?; - // Check if we should still queue DKG - if !self.should_queue_dkg(stacks_client)? { - return Ok(()); - } - // Because there could be a slight delay in reading pending transactions and a key being approved by the contract, - // check one last time if the approved key was set since we finished the should queue dkg call - self.update_approved_aggregate_key(stacks_client)?; - if self.approved_aggregate_public_key.is_some() { - return Ok(()); - } - if self.commands.front() != Some(&SignerCommand::Dkg) { - info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command..."); - self.commands.push_front(SignerCommand::Dkg); - } else { - debug!("{self}: DKG command already queued..."); - } - Ok(()) - } - - /// Overwrites the approved aggregate key to the value in the contract, updating state accordingly - pub fn update_approved_aggregate_key( - &mut self, - stacks_client: &StacksClient, - ) -> Result<(), ClientError> { - let old_dkg = self.approved_aggregate_public_key; - self.approved_aggregate_public_key = - stacks_client.get_approved_aggregate_key(self.reward_cycle)?; - if self.approved_aggregate_public_key.is_some() { - // TODO: this will never work as is. We need to have stored our party shares on the side etc for this particular aggregate key. - // Need to update state to store the necessary info, check against it to see if we have participated in the winning round and - // then overwrite our value accordingly. Otherwise, we will be locked out of the round and should not participate. - let internal_dkg = self.coordinator.aggregate_public_key; - if internal_dkg != self.approved_aggregate_public_key { - warn!("{self}: we do not support changing the internal DKG key yet. Expected {internal_dkg:?} got {:?}", self.approved_aggregate_public_key); - } - self.coordinator - .set_aggregate_public_key(self.approved_aggregate_public_key); - if old_dkg != self.approved_aggregate_public_key { - warn!( - "{self}: updated DKG value from {old_dkg:?} to {:?}.", - self.approved_aggregate_public_key - ); - } - match self.state { - State::OperationInProgress(Operation::Dkg) => { - debug!( - "{self}: DKG has already been set. Aborting DKG operation {}.", - self.coordinator.current_dkg_id - ); - self.finish_operation(); - } - State::Uninitialized => { - // If we successfully load the DKG value, we are fully initialized - self.state = State::Idle; - } - _ => { - // do nothing - } - } - } - Ok(()) - } - - /// Should DKG be queued to the current signer's command queue - /// This assumes that no key has been approved by the contract yet - pub fn should_queue_dkg(&mut self, stacks_client: &StacksClient) -> Result { - if self.state != State::Idle - || self.signer_id != self.get_coordinator_dkg().0 - || self.commands.front() == Some(&SignerCommand::Dkg) - { - // We are not the coordinator, we are in the middle of an operation, or we have already queued DKG. Do not attempt to queue DKG - return Ok(false); - } - let signer_address = stacks_client.get_signer_address(); - let account_nonces = self.get_account_nonces(stacks_client, &[*signer_address]); - let old_transactions = self.get_signer_transactions(&account_nonces).map_err(|e| { - warn!("{self}: Failed to get old signer transactions: {e:?}. May trigger DKG unnecessarily"); - }).unwrap_or_default(); - // Check if we have an existing vote transaction for the same round and reward cycle - for transaction in old_transactions.iter() { - let params = - NakamotoSigners::parse_vote_for_aggregate_public_key(transaction).unwrap_or_else(|| panic!("BUG: {self}: Received an invalid {SIGNERS_VOTING_FUNCTION_NAME} transaction in an already filtered list: {transaction:?}")); - if Some(params.aggregate_key) == self.coordinator.aggregate_public_key - && params.voting_round == self.coordinator.current_dkg_id - { - debug!("{self}: Not triggering a DKG round. Already have a pending vote transaction."; - "txid" => %transaction.txid(), - "aggregate_key" => %params.aggregate_key, - "voting_round" => params.voting_round - ); - return Ok(false); - } - } - if let Some(aggregate_key) = stacks_client.get_vote_for_aggregate_public_key( - self.coordinator.current_dkg_id, - self.reward_cycle, - *signer_address, - )? { - let Some(round_weight) = stacks_client - .get_round_vote_weight(self.reward_cycle, self.coordinator.current_dkg_id)? - else { - // This only will happen if somehow we registered as a signer and were granted no weight which should not really ever happen. - error!("{self}: already voted for DKG, but no round vote weight found. We either have no voting power or the contract is corrupted."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key - ); - return Ok(false); - }; - let threshold_weight = stacks_client.get_vote_threshold_weight(self.reward_cycle)?; - if round_weight < threshold_weight { - // The threshold weight has not been met yet. We should wait for more votes to arrive. - // TODO: this should be on a timeout of some kind. We should not wait forever for the threshold to be met. - // See https://github.com/stacks-network/stacks-core/issues/4568 - debug!("{self}: Not triggering a DKG round. Weight threshold has not been met yet. Waiting for more votes to arrive."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key, - "round_weight" => round_weight, - "threshold_weight" => threshold_weight - ); - return Ok(false); - } - } else { - // Have I already voted, but the vote is still pending in StackerDB? Check stackerdb for the same round number and reward cycle vote transaction - // Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes - let account_nonce = stacks_client.get_account_nonce(signer_address).unwrap_or(0); - let old_transactions = self.stackerdb_manager.get_current_transactions()?; - // Check if we have an existing vote transaction for the same round and reward cycle - for transaction in old_transactions.iter() { - // We should not consider other signer transactions and should ignore invalid transaction versions - if transaction.origin_address() != *signer_address - || transaction.is_mainnet() != self.mainnet - { - continue; - } - let Some(params) = - NakamotoSigners::parse_vote_for_aggregate_public_key(transaction) - else { - continue; - }; - let Some(dkg_public_key) = self.coordinator.aggregate_public_key else { - break; - }; - if params.aggregate_key == dkg_public_key - && params.voting_round == self.coordinator.current_dkg_id - && params.reward_cycle == self.reward_cycle - { - let origin_nonce = transaction.get_origin_nonce(); - if origin_nonce < account_nonce { - // We have already voted, but our vote nonce is outdated. Resubmit vote with updated transaction - warn!("{self}: DKG vote submitted with invalid nonce ({origin_nonce} < {account_nonce}). Resubmitting vote."); - self.process_dkg(stacks_client, &dkg_public_key); - } else { - debug!("{self}: Already have a pending DKG vote in StackerDB. Waiting for it to be confirmed."; - "txid" => %transaction.txid(), - "aggregate_key" => %params.aggregate_key, - "voting_round" => params.voting_round, - "reward_cycle" => params.reward_cycle, - "nonce" => origin_nonce - ); - } - return Ok(false); - } - } - } - Ok(true) - } -} - -fn load_encrypted_signer_state( - storage: S, - id: S::IdType, - private_key: &Scalar, -) -> Result, PersistenceError> { - if let Some(encrypted_state) = storage.get_encrypted_signer_state(id)? { - let serialized_state = decrypt(private_key, &encrypted_state)?; - let state = serde_json::from_slice(&serialized_state) - .expect("Failed to deserialize decryoted state"); - Ok(Some(v2::Signer::load(&state))) - } else { - Ok(None) - } -} - -trait SignerStateStorage { - type IdType; - - fn get_encrypted_signer_state( - self, - signer_config: Self::IdType, - ) -> Result>, PersistenceError>; -} - -impl SignerStateStorage for &mut StackerDBManager { - type IdType = SignerSlotID; - - fn get_encrypted_signer_state( - self, - id: Self::IdType, - ) -> Result>, PersistenceError> { - Ok(self.get_encrypted_signer_state(id)?) - } -} - -impl SignerStateStorage for &SignerDb { - type IdType = u64; - fn get_encrypted_signer_state( - self, - id: Self::IdType, - ) -> Result>, PersistenceError> { - Ok(self.get_encrypted_signer_state(id)?) - } -} - -fn encrypt( - private_key: &Scalar, - msg: &[u8], - rng: &mut impl rand_core::CryptoRngCore, -) -> Result, EncryptionError> { - wsts::util::encrypt(derive_encryption_key(private_key).as_bytes(), msg, rng) - .map_err(|_| EncryptionError::Encrypt) -} - -fn decrypt(private_key: &Scalar, encrypted_msg: &[u8]) -> Result, EncryptionError> { - wsts::util::decrypt(derive_encryption_key(private_key).as_bytes(), encrypted_msg) - .map_err(|_| EncryptionError::Decrypt) -} - -fn derive_encryption_key(private_key: &Scalar) -> Sha512Trunc256Sum { - let mut prefixed_key = "SIGNER_STATE_ENCRYPTION_KEY/".as_bytes().to_vec(); - prefixed_key.extend_from_slice(&private_key.to_bytes()); - - Sha512Trunc256Sum::from_data(&prefixed_key) -} - -/// Error stemming from a persistence operation -#[derive(Debug, thiserror::Error)] -pub enum PersistenceError { - /// Encryption error - #[error("{0}")] - Encryption(#[from] EncryptionError), - /// Database error - #[error("Database operation failed: {0}")] - DBError(#[from] DBError), - /// Serialization error - #[error("JSON serialization failed: {0}")] - JsonSerializationError(#[from] serde_json::Error), - /// StackerDB client error - #[error("StackerDB client error: {0}")] - StackerDBClientError(#[from] ClientError), -} - -/// Error stemming from a persistence operation -#[derive(Debug, thiserror::Error)] -pub enum EncryptionError { - /// Encryption failed - #[error("Encryption operation failed")] - Encrypt, - /// Decryption failed - #[error("Encryption operation failed")] - Decrypt, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn encrypted_messages_should_be_possible_to_decrypt() { - let msg = "Nobody's gonna know".as_bytes(); - let key = Scalar::random(&mut OsRng); - - let encrypted = encrypt(&key, msg, &mut OsRng).unwrap(); - - assert_ne!(encrypted, msg); - - let decrypted = decrypt(&key, &encrypted).unwrap(); - - assert_eq!(decrypted, msg); - } -} diff --git a/stacks-signer/src/v1/stackerdb_manager.rs b/stacks-signer/src/v1/stackerdb_manager.rs deleted file mode 100644 index cf5e484022..0000000000 --- a/stacks-signer/src/v1/stackerdb_manager.rs +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -// -use blockstack_lib::chainstate::stacks::StacksTransaction; -use clarity::types::chainstate::StacksPrivateKey; -use libsigner::v1::messages::{MessageSlotID, SignerMessage}; -use libsigner::{SignerSession, StackerDBSession}; -use libstackerdb::StackerDBChunkAckData; -use slog::{slog_debug, slog_error, slog_warn}; -use stacks_common::codec::read_next; -use stacks_common::{debug, error, warn}; -use wsts::net::Packet; - -use crate::client::stackerdb::StackerDB; -use crate::client::{retry_with_exponential_backoff, ClientError, SignerSlotID}; -use crate::config::SignerConfig; - -/// The session manager for communicating with the .signers contracts for the current and next reward cycle -#[derive(Debug)] -pub struct StackerDBManager { - /// The stacker-db transaction msg session for the NEXT reward cycle - next_transaction_session: StackerDBSession, - /// The stacker-db sessions for each signer set and message type. - stackerdb: StackerDB, -} - -impl From<&SignerConfig> for StackerDBManager { - fn from(config: &SignerConfig) -> Self { - let stackerdb = StackerDB::from(config); - let next_transaction_session = StackerDBSession::new( - &config.node_host, - MessageSlotID::Transactions - .stacker_db_contract(config.mainnet, config.reward_cycle.wrapping_add(1)), - ); - Self { - next_transaction_session, - stackerdb, - } - } -} -impl StackerDBManager { - /// Create a new StackerDB Manager - pub fn new( - host: &str, - stacks_private_key: StacksPrivateKey, - is_mainnet: bool, - reward_cycle: u64, - signer_slot_id: SignerSlotID, - ) -> Self { - let stackerdb = StackerDB::new( - host, - stacks_private_key, - is_mainnet, - reward_cycle, - signer_slot_id, - ); - let next_transaction_session = StackerDBSession::new( - host, - MessageSlotID::Transactions - .stacker_db_contract(is_mainnet, reward_cycle.wrapping_add(1)), - ); - Self { - next_transaction_session, - stackerdb, - } - } - - /// Send a message to the stackerdb with retry - pub fn send_message_with_retry( - &mut self, - message: SignerMessage, - ) -> Result { - self.stackerdb.send_message_with_retry(message) - } - - /// Sends message (as a raw msg ID and bytes) to the .signers stacker-db with an - /// exponential backoff retry - pub fn send_message_bytes_with_retry( - &mut self, - msg_id: &MessageSlotID, - message_bytes: Vec, - ) -> Result { - self.stackerdb - .send_message_bytes_with_retry(msg_id, message_bytes) - } - - /// Get the ordered DKG packets from stackerdb for the signer slot IDs. - pub fn get_dkg_packets( - &mut self, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { - let packet_slots = &[ - MessageSlotID::DkgBegin, - MessageSlotID::DkgPublicShares, - MessageSlotID::DkgPrivateBegin, - MessageSlotID::DkgPrivateShares, - MessageSlotID::DkgEndBegin, - MessageSlotID::DkgEnd, - ]; - let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); - let mut packets = vec![]; - for packet_slot in packet_slots { - let session = self - .stackerdb - .get_session_mut(packet_slot) - .ok_or(ClientError::NotConnected)?; - let messages = StackerDB::get_messages(session, &slot_ids)?; - for message in messages { - let SignerMessage::Packet(packet) = message else { - warn!("Found an unexpected type in a packet slot {packet_slot}"); - continue; - }; - packets.push(packet); - } - } - Ok(packets) - } - - /// Get the transactions from stackerdb for the signers - fn get_transactions( - transactions_session: &mut StackerDBSession, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { - let slot_ids = signer_ids.iter().map(|id| id.0).collect::>(); - let messages = StackerDB::get_messages(transactions_session, &slot_ids)?; - let mut transactions = vec![]; - for message in messages { - let SignerMessage::Transactions(chunk_transactions) = message else { - warn!("Signer wrote an unexpected type to the transactions slot"); - continue; - }; - transactions.extend(chunk_transactions); - } - Ok(transactions) - } - - /// Get this signer's latest transactions from stackerdb - pub fn get_current_transactions(&mut self) -> Result, ClientError> { - let signer_slot_id = self.get_signer_slot_id(); - let Some(transactions_session) = - self.stackerdb.get_session_mut(&MessageSlotID::Transactions) - else { - return Err(ClientError::NotConnected); - }; - Self::get_transactions(transactions_session, &[signer_slot_id]) - } - - /// Get the latest signer transactions from signer ids for the next reward cycle - pub fn get_next_transactions( - &mut self, - signer_ids: &[SignerSlotID], - ) -> Result, ClientError> { - debug!("Getting latest chunks from stackerdb for the following signers: {signer_ids:?}",); - Self::get_transactions(&mut self.next_transaction_session, signer_ids) - } - - /// Get the encrypted state for the given signer - pub fn get_encrypted_signer_state( - &mut self, - signer_id: SignerSlotID, - ) -> Result>, ClientError> { - debug!("Getting the persisted encrypted state for signer {signer_id}"); - let Some(state_session) = self - .stackerdb - .get_session_mut(&MessageSlotID::EncryptedSignerState) - else { - return Err(ClientError::NotConnected); - }; - - let send_request = || { - state_session - .get_latest_chunks(&[signer_id.0]) - .map_err(backoff::Error::transient) - }; - - let Some(chunk) = retry_with_exponential_backoff(send_request)?.pop().ok_or( - ClientError::UnexpectedResponseFormat(format!( - "Missing response for state session request for signer {}", - signer_id - )), - )? - else { - debug!("No persisted state for signer {signer_id}"); - return Ok(None); - }; - - if chunk.is_empty() { - debug!("Empty persisted state for signer {signer_id}"); - return Ok(None); - } - - let SignerMessage::EncryptedSignerState(state) = - read_next::(&mut chunk.as_slice())? - else { - error!("Wrong message type stored in signer state slot for signer {signer_id}"); - return Ok(None); - }; - - Ok(Some(state)) - } - - /// Retrieve the signer set this stackerdb client is attached to - pub fn get_signer_set(&self) -> u32 { - self.stackerdb.get_signer_set() - } - - /// Retrieve the signer slot ID - pub fn get_signer_slot_id(&self) -> SignerSlotID { - self.stackerdb.get_signer_slot_id() - } -} - -#[cfg(test)] -mod tests { - use std::thread::spawn; - use std::time::Duration; - - use blockstack_lib::chainstate::stacks::{ - TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, - TransactionSmartContract, TransactionVersion, - }; - use blockstack_lib::util_lib::strings::StacksString; - use clarity::codec::StacksMessageCodec; - use clarity::types::chainstate::StacksPrivateKey; - use libstackerdb::StackerDBChunkAckData; - - use super::*; - use crate::client::tests::{generate_signer_config, mock_server_from_config, write_response}; - use crate::config::GlobalConfig; - - #[test] - fn get_signer_transactions_should_succeed() { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); - let signer_config = generate_signer_config(&config, 5, 20); - let mut manager = StackerDBManager::from(&signer_config); - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - - let signer_message = SignerMessage::Transactions(vec![tx.clone()]); - let message = signer_message.serialize_to_vec(); - - let signer_slot_ids = vec![SignerSlotID(0), SignerSlotID(1)]; - let h = spawn(move || manager.get_next_transactions(&signer_slot_ids)); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - response_bytes.extend(message); - let mock_server = mock_server_from_config(&config); - write_response(mock_server, response_bytes.as_slice()); - - let signer_message = SignerMessage::Transactions(vec![]); - let message = signer_message.serialize_to_vec(); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - response_bytes.extend(message); - let mock_server = mock_server_from_config(&config); - write_response(mock_server, response_bytes.as_slice()); - - let transactions = h.join().unwrap().unwrap(); - assert_eq!(transactions, vec![tx]); - } - - #[test] - fn send_signer_message_should_succeed() { - let config = GlobalConfig::load_from_file("./src/tests/conf/signer-1.toml").unwrap(); - let signer_config = generate_signer_config(&config, 5, 20); - let mut stackerdb = StackerDBManager::from(&signer_config); - - let sk = StacksPrivateKey::new(); - let tx = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: 0, - auth: TransactionAuth::from_p2pkh(&sk).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - - let signer_message = SignerMessage::Transactions(vec![tx]); - let ack = StackerDBChunkAckData { - accepted: true, - reason: None, - metadata: None, - code: None, - }; - let mock_server = mock_server_from_config(&config); - let h = spawn(move || stackerdb.send_message_with_retry(signer_message)); - let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); - let payload = serde_json::to_string(&ack).expect("Failed to serialize ack"); - response_bytes.extend(payload.as_bytes()); - std::thread::sleep(Duration::from_millis(500)); - write_response(mock_server, response_bytes.as_slice()); - assert_eq!(ack, h.join().unwrap().unwrap()); - } -} diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index d04fc3b1af..edd58c6161 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -56,7 +56,6 @@ stacks-common = { path = "../stacks-common" } pox-locking = { path = "../pox-locking" } libstackerdb = { path = "../libstackerdb" } siphasher = "0.3.7" -wsts = { workspace = true } hashbrown = { workspace = true } rusqlite = { workspace = true } diff --git a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs index 3933eacaa6..648859abc6 100644 --- a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs +++ b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs @@ -21,7 +21,6 @@ use stacks_common::deps_common::bitcoin::blockdata::script::Builder; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksAddress}; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::secp256k1::Secp256k1PublicKey; -use wsts::curve::point::{Compressed, Point}; use crate::burnchains::bitcoin::bits::parse_script; use crate::burnchains::bitcoin::{BitcoinTxInput, BitcoinTxInputStructured}; @@ -175,26 +174,18 @@ impl VoteForAggregateKeyOp { /// Check the payload of a vote-for-aggregate-key burn op. /// Both `signer_key` and `aggregate_key` are checked for validity against - /// `Secp256k1PublicKey` from `stacks_common` as well as `Point` from wsts. + /// `Secp256k1PublicKey` from `stacks_common` pub fn check(&self) -> Result<(), op_error> { // Check to see if the aggregate key is valid let aggregate_key_bytes = self.aggregate_key.as_bytes(); Secp256k1PublicKey::from_slice(aggregate_key_bytes) .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - let compressed = Compressed::try_from(aggregate_key_bytes.clone()) - .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Point::try_from(&compressed).map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - // Check to see if the signer key is valid let signer_key_bytes = self.signer_key.as_bytes(); Secp256k1PublicKey::from_slice(signer_key_bytes) .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - let compressed = Compressed::try_from(signer_key_bytes.clone()) - .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Point::try_from(&compressed).map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Ok(()) } } diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index cf016adb7d..0a59c1a67b 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -34,7 +34,6 @@ use stacks_common::types::{Address, StacksEpoch, StacksEpochId, StacksPublicKeyB use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::vrf::VRFProof; -use wsts::curve::point::Point; use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 1d267b047f..4d99b53821 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -487,7 +487,6 @@ impl NakamotoBlockBuilder { tenure_info: NakamotoTenureInfo, settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, - signer_transactions: Vec, signer_bitvec_len: u16, ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { let (tip_consensus_hash, tip_block_hash, tip_height) = ( @@ -522,14 +521,13 @@ impl NakamotoBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); - let mut initial_txs: Vec<_> = [ + let initial_txs: Vec<_> = [ tenure_info.tenure_change_tx.clone(), tenure_info.coinbase_tx.clone(), ] .into_iter() .filter_map(|x| x) .collect(); - initial_txs.extend(signer_transactions); // TODO: update this mempool check to prioritize signer vote transactions over other transactions let (blocked, tx_events) = match StacksBlockBuilder::select_and_apply_transactions( diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index e97fefafff..6a850e6d35 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -53,7 +53,6 @@ use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; use stacks_common::util::{get_epoch_time_secs, sleep_ms}; -use wsts::curve::point::Point; use self::signer_set::SignerCalculation; use super::burn::db::sortdb::{ @@ -74,7 +73,7 @@ use super::stacks::db::{ use super::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use super::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeError, TenureChangePayload, ThresholdSignature, TransactionPayload, + TenureChangeError, TenureChangePayload, TransactionPayload, }; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; @@ -4499,8 +4498,8 @@ impl NakamotoChainState { /// Boot code instantiation for the aggregate public key. /// TODO: This should be removed once it's possible for stackers to vote on the aggregate /// public key - pub fn aggregate_public_key_bootcode(clarity_tx: &mut ClarityTx, apk: &Point) { - let agg_pub_key = to_hex(&apk.compress().data); + pub fn aggregate_public_key_bootcode(clarity_tx: &mut ClarityTx, apk: Vec) { + let agg_pub_key = to_hex(&apk); let contract_content = format!( "(define-read-only ({}) 0x{})", BOOT_TEST_POX_4_AGG_KEY_FNAME, agg_pub_key diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index d7eaad51b5..38e76f7e51 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -47,7 +47,6 @@ use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; -use wsts::curve::point::{Compressed, Point}; use crate::burnchains::{Burnchain, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{ @@ -73,8 +72,8 @@ use crate::chainstate::stacks::db::{ use crate::chainstate::stacks::events::{StacksTransactionReceipt, TransactionOrigin}; use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeCause, TenureChangeError, TenureChangePayload, ThresholdSignature, - TransactionPayload, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, + TenureChangeCause, TenureChangeError, TenureChangePayload, TransactionPayload, + MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, }; use crate::clarity::vm::clarity::{ClarityConnection, TransactionConnection}; use crate::clarity_vm::clarity::{ @@ -101,7 +100,7 @@ pub struct SignerCalculation { pub struct AggregateKeyVoteParams { pub signer_index: u64, - pub aggregate_key: Point, + pub aggregate_key: Vec, pub voting_round: u64, pub reward_cycle: u64, } @@ -547,10 +546,8 @@ impl NakamotoSigners { } let signer_index_value = payload.function_args.first()?; let signer_index = u64::try_from(signer_index_value.clone().expect_u128().ok()?).ok()?; - let point_value = payload.function_args.get(1)?; - let point_bytes = point_value.clone().expect_buff(33).ok()?; - let compressed_data = Compressed::try_from(point_bytes.as_slice()).ok()?; - let aggregate_key = Point::try_from(&compressed_data).ok()?; + let aggregate_key_value = payload.function_args.get(1)?; + let aggregate_key = aggregate_key_value.clone().expect_buff(33).ok()?; let round_value = payload.function_args.get(2)?; let voting_round = u64::try_from(round_value.clone().expect_u128().ok()?).ok()?; let reward_cycle = diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 059da96b7a..4b7734653c 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -87,7 +87,6 @@ use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; -use wsts::curve::point::Point; use crate::burnchains::{PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::{ @@ -108,8 +107,8 @@ use crate::chainstate::stacks::db::{ use crate::chainstate::stacks::events::StacksTransactionReceipt; use crate::chainstate::stacks::{ Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksMicroblock, StacksTransaction, - TenureChangeCause, TenureChangeError, TenureChangePayload, ThresholdSignature, - TransactionPayload, MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, + TenureChangeCause, TenureChangeError, TenureChangePayload, TransactionPayload, + MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH, }; use crate::clarity_vm::clarity::{ClarityInstance, PreCommitClarityBlock}; use crate::clarity_vm::database::SortitionDBRef; diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 4ab7613751..6fd559da69 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -25,8 +25,9 @@ use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::types::*; use hashbrown::HashMap; +use rand::distributions::Standard; use rand::seq::SliceRandom; -use rand::{CryptoRng, RngCore, SeedableRng}; +use rand::{CryptoRng, Rng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; @@ -36,8 +37,6 @@ use stacks_common::types::chainstate::{ use stacks_common::util::hash::Hash160; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; -use wsts::curve::point::Point; -use wsts::traits::Aggregator; use self::boot::RewardSet; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; @@ -68,75 +67,32 @@ use crate::util_lib::db::Error as db_error; #[derive(Debug, Clone, PartialEq)] pub struct TestSigners { - /// The parties that will sign the blocks - pub signer_parties: Vec, - /// The commitments to the polynomials for the aggregate public key - pub poly_commitments: HashMap, - /// The aggregate public key - pub aggregate_public_key: Point, - /// The total number of key ids distributed among signer_parties - pub num_keys: u32, - /// The number of vote shares required to sign a block + /// The number of signatures required to validate a block pub threshold: u32, - /// The key ids distributed among signer_parties - pub party_key_ids: Vec>, - /// The cycle for which the signers are valid - pub cycle: u64, /// The signer's private keys pub signer_keys: Vec, + /// The aggregate public key + pub aggregate_public_key: Vec, + /// The cycle for which the aggregate public key was generated + pub cycle: u64, } impl Default for TestSigners { fn default() -> Self { - let mut rng = rand_core::OsRng::default(); - let num_keys = 10; - let threshold = 7; - let party_key_ids: Vec> = - vec![vec![1, 2, 3], vec![4, 5], vec![6, 7, 8], vec![9, 10]]; - let num_parties = party_key_ids.len().try_into().unwrap(); - - // Create the parties - let mut signer_parties: Vec = party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - num_keys, - threshold, - &mut rng, - ) - }) - .collect(); + let aggregate_public_key: Vec = + rand::thread_rng().sample_iter(Standard).take(33).collect(); + let num_signers = 5; + let threshold = 5 * 7 / 10; let mut signer_keys = Vec::::new(); - for _ in 0..num_keys { + for _ in 0..num_signers { signer_keys.push(Secp256k1PrivateKey::default()); } - - // Generate an aggregate public key - let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { - Ok(poly_commitments) => poly_commitments, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let mut sig_aggregator = wsts::v2::Aggregator::new(num_keys, threshold); - sig_aggregator - .init(&poly_commitments) - .expect("aggregator init failed"); - let aggregate_public_key = sig_aggregator.poly[0]; Self { - signer_parties, - aggregate_public_key, - poly_commitments, - num_keys, threshold, - party_key_ids, - cycle: 0, signer_keys, + aggregate_public_key, + cycle: 0, } } } @@ -149,50 +105,15 @@ impl TestSigners { /// Internal function to generate aggregate key information fn default_with_signers(signer_keys: Vec) -> Self { - let mut rng = rand_core::OsRng::default(); - let num_keys = 10; - let threshold = 7; - let party_key_ids: Vec> = - vec![vec![1, 2, 3], vec![4, 5], vec![6, 7, 8], vec![9, 10]]; - let num_parties = party_key_ids.len().try_into().unwrap(); - - // Create the parties - let mut signer_parties: Vec = party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - num_keys, - threshold, - &mut rng, - ) - }) - .collect(); - - // Generate an aggregate public key - let poly_commitments = match wsts::v2::test_helpers::dkg(&mut signer_parties, &mut rng) { - Ok(poly_commitments) => poly_commitments, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let mut sig_aggregator = wsts::v2::Aggregator::new(num_keys, threshold); - sig_aggregator - .init(&poly_commitments) - .expect("aggregator init failed"); - let aggregate_public_key = sig_aggregator.poly[0]; + let aggregate_public_key: Vec = + rand::thread_rng().sample_iter(Standard).take(33).collect(); + let num_signers = signer_keys.len(); + let threshold = u32::try_from(num_signers * 7 / 10).unwrap(); Self { - signer_parties, - aggregate_public_key, - poly_commitments, - num_keys, threshold, - party_key_ids, - cycle: 0, signer_keys, + aggregate_public_key, + cycle: 0, } } @@ -278,25 +199,6 @@ impl TestSigners { keys.iter().map(|key| key.sign(&msg).unwrap()).collect() } - /// Sign a Nakamoto block using the aggregate key. - /// NB: this function is current unused. - #[allow(dead_code)] - fn sign_block_with_aggregate_key(&mut self, block: &NakamotoBlock) -> ThresholdSignature { - let mut rng = rand_core::OsRng::default(); - let msg = block.header.signer_signature_hash().0; - let (nonces, sig_shares, key_ids) = - wsts::v2::test_helpers::sign(msg.as_slice(), &mut self.signer_parties, &mut rng); - - let mut sig_aggregator = wsts::v2::Aggregator::new(self.num_keys, self.threshold); - sig_aggregator - .init(&self.poly_commitments) - .expect("aggregator init failed"); - let signature = sig_aggregator - .sign(msg.as_slice(), &nonces, &sig_shares, &key_ids) - .expect("aggregator sig failed"); - ThresholdSignature(signature) - } - /// Generate an list of signatures for a block. Only /// signers in the reward set will be included. pub fn generate_ordered_signatures( @@ -353,45 +255,16 @@ impl TestSigners { } // Generate and assign a new aggregate public key - pub fn generate_aggregate_key(&mut self, cycle: u64) -> Point { + pub fn generate_aggregate_key(&mut self, cycle: u64) -> Vec { // If the key is already generated for this cycle, return it if cycle == self.cycle { debug!("Returning cached aggregate key for cycle {}", cycle); return self.aggregate_public_key.clone(); } - debug!("Generating aggregate key for cycle {}", cycle); - let mut rng = ChaCha20Rng::seed_from_u64(cycle); - let num_parties = self.party_key_ids.len().try_into().unwrap(); - // Create the parties - self.signer_parties = self - .party_key_ids - .iter() - .enumerate() - .map(|(pid, pkids)| { - wsts::v2::Party::new( - pid.try_into().unwrap(), - pkids, - num_parties, - self.num_keys, - self.threshold, - &mut rng, - ) - }) - .collect(); - self.poly_commitments = - match wsts::v2::test_helpers::dkg(&mut self.signer_parties, &mut rng) { - Ok(poly_commitments) => poly_commitments, - Err(secret_errors) => { - panic!("Got secret errors from DKG: {:?}", secret_errors); - } - }; - let mut sig_aggregator = wsts::v2::Aggregator::new(self.num_keys, self.threshold); - sig_aggregator - .init(&self.poly_commitments) - .expect("aggregator init failed"); - self.aggregate_public_key = sig_aggregator.poly[0]; - self.cycle = cycle; - self.aggregate_public_key.clone() + let aggregate_public_key: Vec = + rand::thread_rng().sample_iter(Standard).take(33).collect(); + self.aggregate_public_key = aggregate_public_key.clone(); + aggregate_public_key } } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 722cfa541a..ea163730ec 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -25,7 +25,8 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::StacksAddressExtensions; use clarity::vm::Value; use libstackerdb::StackerDBChunkData; -use rand::{thread_rng, RngCore}; +use rand::distributions::Standard; +use rand::{thread_rng, Rng, RngCore}; use rusqlite::types::ToSql; use rusqlite::{params, Connection}; use stacks_common::address::AddressHashMode; @@ -45,8 +46,6 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof, VRFPublicKey, VRF}; use stdext::prelude::Integer; use stx_genesis::GenesisData; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; use crate::burnchains::{BurnchainSigner, PoxConstants, Txid}; use crate::chainstate::burn::db::sortdb::tests::make_fork_run; @@ -83,9 +82,9 @@ use crate::chainstate::stacks::db::{ }; use crate::chainstate::stacks::{ CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksTransaction, - StacksTransactionSigner, TenureChangeCause, TenureChangePayload, ThresholdSignature, - TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionContractCall, - TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, + StacksTransactionSigner, TenureChangeCause, TenureChangePayload, TokenTransferMemo, + TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, + TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, }; use crate::core; use crate::core::{StacksEpochExtension, STACKS_EPOCH_3_0_MARKER}; @@ -2170,9 +2169,8 @@ fn parse_vote_for_aggregate_public_key_valid() { let signer_index = thread_rng().next_u64(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key.clone()).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2181,7 +2179,7 @@ fn parse_vote_for_aggregate_public_key_valid() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2201,7 +2199,7 @@ fn parse_vote_for_aggregate_public_key_valid() { }; let params = NakamotoSigners::parse_vote_for_aggregate_public_key(&valid_tx).unwrap(); assert_eq!(params.signer_index, signer_index); - assert_eq!(params.aggregate_key, point); + assert_eq!(params.aggregate_key, aggregate_key); assert_eq!(params.voting_round, round); assert_eq!(params.reward_cycle, reward_cycle); } @@ -2217,10 +2215,8 @@ fn parse_vote_for_aggregate_public_key_invalid() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2229,7 +2225,7 @@ fn parse_vote_for_aggregate_public_key_invalid() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2297,8 +2293,8 @@ fn parse_vote_for_aggregate_public_key_invalid() { contract_name: contract_name.clone(), function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ], @@ -2340,8 +2336,8 @@ fn parse_vote_for_aggregate_public_key_invalid() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), reward_cycle_arg.clone(), ], }), @@ -2361,9 +2357,9 @@ fn parse_vote_for_aggregate_public_key_invalid() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), ], }), }; @@ -2403,9 +2399,8 @@ fn valid_vote_transaction() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2414,7 +2409,7 @@ fn valid_vote_transaction() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2454,9 +2449,8 @@ fn valid_vote_transaction_malformed_transactions() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2465,7 +2459,7 @@ fn valid_vote_transaction_malformed_transactions() { let valid_function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2566,8 +2560,8 @@ fn valid_vote_transaction_malformed_transactions() { contract_name: contract_name.clone(), function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ], @@ -2609,8 +2603,8 @@ fn valid_vote_transaction_malformed_transactions() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), + aggregate_key_arg.clone(), reward_cycle_arg.clone(), ], }), @@ -2630,9 +2624,9 @@ fn valid_vote_transaction_malformed_transactions() { function_name: SIGNERS_VOTING_FUNCTION_NAME.into(), function_args: vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), ], }), }; @@ -2689,9 +2683,8 @@ fn filter_one_transaction_per_signer_multiple_addresses() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2700,7 +2693,7 @@ fn filter_one_transaction_per_signer_multiple_addresses() { let function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; @@ -2818,9 +2811,8 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { let signer_index = thread_rng().next_u32(); let signer_index_arg = Value::UInt(signer_index as u128); - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); + let aggregate_key: Vec = rand::thread_rng().sample_iter(Standard).take(33).collect(); + let aggregate_key_arg = Value::buff_from(aggregate_key).expect("Failed to create buff"); let round = thread_rng().next_u64(); let round_arg = Value::UInt(round as u128); @@ -2829,7 +2821,7 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { let function_args = vec![ signer_index_arg.clone(), - point_arg.clone(), + aggregate_key_arg.clone(), round_arg.clone(), reward_cycle_arg.clone(), ]; diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index d23d608ec7..e7d6fef03f 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -35,8 +35,6 @@ use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::Secp256k1PrivateKey; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; -use wsts::curve::point::Point; -use wsts::traits::Aggregator; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::tests::*; diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 88ecc8887e..8562449dd3 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -44,8 +44,6 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, StacksAddress, StacksBlockId, StacksPublicKey, }; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160}; -use wsts::curve::point::{Compressed, Point}; -use wsts::curve::scalar::Scalar; use crate::burnchains::bitcoin::address::BitcoinAddress; use crate::burnchains::{Address, Burnchain, PoxConstants}; @@ -1350,7 +1348,7 @@ impl StacksChainState { sortdb: &SortitionDB, block_id: &StacksBlockId, reward_cycle: u64, - ) -> Result, Error> { + ) -> Result>, Error> { let aggregate_public_key_opt = self .eval_boot_code_read_only( sortdb, @@ -1367,11 +1365,7 @@ impl StacksChainState { let aggregate_public_key = match aggregate_public_key_opt { Some(value) => { // A point should have 33 bytes exactly. - let data = value.expect_buff(33)?; - let msg = - "Pox-4 signers-voting get-approved-aggregate-key returned a corrupted value."; - let compressed_data = Compressed::try_from(data.as_slice()).expect(msg); - Some(Point::try_from(&compressed_data).expect(msg)) + Some(value.expect_buff(33)?) } None => None, }; @@ -2038,13 +2032,12 @@ pub mod test { key: &StacksPrivateKey, nonce: u64, signer_index: u128, - aggregate_public_key: &Point, + aggregate_public_key: Vec, round: u128, cycle: u128, ) -> StacksTransaction { - let aggregate_public_key_val = - Value::buff_from(aggregate_public_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key_val = Value::buff_from(aggregate_public_key) + .expect("Failed to serialize aggregate public key"); make_signers_vote_for_aggregate_public_key_value( key, nonce, @@ -2085,7 +2078,7 @@ pub mod test { peer: &mut TestPeer<'_>, latest_block_id: StacksBlockId, reward_cycle: u128, - ) -> Option { + ) -> Option> { let key_opt = readonly_call( peer, &latest_block_id, @@ -2095,11 +2088,7 @@ pub mod test { ) .expect_optional() .unwrap(); - key_opt.map(|key_value| { - let data = key_value.expect_buff(33).unwrap(); - let compressed_data = Compressed::try_from(data.as_slice()).unwrap(); - Point::try_from(&compressed_data).unwrap() - }) + key_opt.map(|key_value| key_value.expect_buff(33).unwrap()) } pub fn make_pox_2_increase( diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 8fee5bd5b3..d1cceae7cf 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -44,7 +44,6 @@ use stacks_common::types::{Address, PrivateKey}; use stacks_common::util::hash::{hex_bytes, to_hex, Sha256Sum, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stdext::num::integer::Integer; -use wsts::curve::point::{Compressed, Point}; use super::test::*; use super::RawRewardSetEntry; @@ -7154,7 +7153,7 @@ fn test_scenario_one(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7164,7 +7163,7 @@ fn test_scenario_one(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7185,7 +7184,7 @@ fn test_scenario_one(use_nakamoto: bool) { &tester_key, 1, // only tx is a stack-stx tester_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.unwrap(), 1, next_reward_cycle, ); @@ -7542,7 +7541,7 @@ fn test_scenario_two(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7552,7 +7551,7 @@ fn test_scenario_two(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -7562,7 +7561,7 @@ fn test_scenario_two(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 3, next_reward_cycle, ); @@ -7572,7 +7571,7 @@ fn test_scenario_two(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.unwrap(), 1, next_reward_cycle, ); @@ -8289,7 +8288,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8299,7 +8298,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8309,7 +8308,7 @@ fn test_scenario_four(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8334,7 +8333,7 @@ fn test_scenario_four(use_nakamoto: bool) { &tester_key, 1, // only tx is a stack-stx tester_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -8388,7 +8387,10 @@ fn test_scenario_four(use_nakamoto: bool) { let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) .expect("No approved key found"); - assert_eq!(approved_key, peer_config.aggregate_public_key.unwrap()); + assert_eq!( + approved_key, + peer_config.aggregate_public_key.clone().unwrap() + ); // Alice stack-extend err tx let alice_extend_err = make_pox_4_extend( @@ -8422,7 +8424,7 @@ fn test_scenario_four(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, 7, ); @@ -9714,7 +9716,7 @@ fn test_scenario_five(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9722,7 +9724,7 @@ fn test_scenario_five(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9730,7 +9732,7 @@ fn test_scenario_five(use_nakamoto: bool) { &carl.private_key, carl.nonce, carl_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9922,7 +9924,7 @@ fn test_scenario_five(use_nakamoto: bool) { &alice.private_key, alice.nonce, alice_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9930,7 +9932,7 @@ fn test_scenario_five(use_nakamoto: bool) { &bob.private_key, bob.nonce, bob_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); @@ -9938,7 +9940,7 @@ fn test_scenario_five(use_nakamoto: bool) { &carl.private_key, carl.nonce, carl_index, - &peer_config.aggregate_public_key.unwrap(), + peer_config.aggregate_public_key.clone().unwrap(), 1, next_reward_cycle, ); diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 35c82f9b94..127751abbb 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -734,49 +734,6 @@ pub enum TenureChangeError { NotNakamoto, } -/// Schnorr threshold signature using types from `wsts` -#[derive(Debug, Clone, PartialEq)] -pub struct ThresholdSignature(pub wsts::common::Signature); -impl FromSql for ThresholdSignature { - fn column_result(value: ValueRef) -> FromSqlResult { - let hex_str = value.as_str()?; - let bytes = hex_bytes(&hex_str).map_err(|_| FromSqlError::InvalidType)?; - let ts = ThresholdSignature::consensus_deserialize(&mut &bytes[..]) - .map_err(|_| FromSqlError::InvalidType)?; - Ok(ts) - } -} - -impl fmt::Display for ThresholdSignature { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - to_hex(&self.serialize_to_vec()).fmt(f) - } -} - -impl ToSql for ThresholdSignature { - fn to_sql(&self) -> rusqlite::Result { - let bytes = self.serialize_to_vec(); - let hex_str = to_hex(&bytes); - Ok(hex_str.into()) - } -} - -impl serde::Serialize for ThresholdSignature { - fn serialize(&self, s: S) -> Result { - let bytes = self.serialize_to_vec(); - s.serialize_str(&to_hex(&bytes)) - } -} - -impl<'de> serde::Deserialize<'de> for ThresholdSignature { - fn deserialize>(d: D) -> Result { - let hex_str = String::deserialize(d)?; - let bytes = hex_bytes(&hex_str).map_err(serde::de::Error::custom)?; - ThresholdSignature::consensus_deserialize(&mut bytes.as_slice()) - .map_err(serde::de::Error::custom) - } -} - /// A transaction from Stackers to signal new mining tenure #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TenureChangePayload { diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 2204f57a25..c45b212b68 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -28,9 +28,6 @@ use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; -use wsts::common::Signature as Secp256k1Signature; -use wsts::curve::point::{Compressed as Secp256k1Compressed, Point as Secp256k1Point}; -use wsts::curve::scalar::Scalar as Secp256k1Scalar; use crate::burnchains::Txid; use crate::chainstate::stacks::{TransactionPayloadID, *}; @@ -154,46 +151,6 @@ impl StacksMessageCodec for TenureChangeCause { } } -impl StacksMessageCodec for ThresholdSignature { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - let compressed = self.0.R.compress(); - let bytes = compressed.as_bytes(); - fd.write_all(bytes).map_err(CodecError::WriteError)?; - write_next(fd, &self.0.z.to_bytes())?; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - // Read curve point - let mut buf = [0u8; 33]; - fd.read_exact(&mut buf).map_err(CodecError::ReadError)?; - let R = Secp256k1Point::try_from(&Secp256k1Compressed::from(buf)) - .map_err(|_| CodecError::DeserializeError("Failed to read curve point".into()))?; - - // Read scalar - let mut buf = [0u8; 32]; - fd.read_exact(&mut buf).map_err(CodecError::ReadError)?; - let z = Secp256k1Scalar::from(buf); - - Ok(Self(Secp256k1Signature { R, z })) - } -} - -impl ThresholdSignature { - pub fn verify(&self, public_key: &Secp256k1Point, msg: &[u8]) -> bool { - self.0.verify(public_key, msg) - } - - /// Create an empty/null signature. This is not valid data, but it is used - /// as a placeholder in the header during mining. - pub fn empty() -> Self { - Self(Secp256k1Signature { - R: Secp256k1Point::G(), - z: Secp256k1Scalar::new(), - }) - } -} - impl StacksMessageCodec for TenureChangePayload { fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &self.tenure_consensus_hash)?; diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index a2f4fe5dc5..132a03f34d 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -31,7 +31,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs index 5f03c3811a..eb43d8aecd 100644 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ b/stackslib/src/net/download/nakamoto/mod.rs @@ -127,7 +127,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 80065dc0c6..98f102969a 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -31,7 +31,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; use crate::chainstate::burn::db::sortdb::{ diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 54661a2f09..628243d53e 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -1749,7 +1749,6 @@ pub mod test { use stacks_common::util::secp256k1::*; use stacks_common::util::uint::*; use stacks_common::util::vrf::*; - use wsts::curve::point::Point; use {mio, rand}; use self::nakamoto::test_signers::TestSigners; @@ -2099,7 +2098,7 @@ pub mod test { pub services: u16, /// aggregate public key to use /// (NOTE: will be used post-Nakamoto) - pub aggregate_public_key: Option, + pub aggregate_public_key: Option>, pub test_stackers: Option>, pub test_signers: Option, } @@ -2457,11 +2456,8 @@ pub mod test { let mut receipts = vec![]; if let Some(agg_pub_key) = agg_pub_key_opt { - debug!( - "Setting aggregate public key to {}", - &to_hex(&agg_pub_key.compress().data) - ); - NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, &agg_pub_key); + debug!("Setting aggregate public key to {}", &to_hex(&agg_pub_key)); + NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, agg_pub_key); } else { debug!("Not setting aggregate public key"); } diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index a6307b324b..cc90d90011 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -36,9 +36,8 @@ use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::chainstate::stacks::{ - CoinbasePayload, StacksTransaction, TenureChangeCause, TenureChangePayload, ThresholdSignature, - TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionPayload, - TransactionVersion, + CoinbasePayload, StacksTransaction, TenureChangeCause, TenureChangePayload, TokenTransferMemo, + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionVersion, }; use crate::clarity::vm::types::StacksAddressExtensions; use crate::net::api::gettenureinfo::RPCGetTenureInfo; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index a74cb0fd2c..07227c930e 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -35,7 +35,6 @@ use stacks_common::types::chainstate::{ }; use stacks_common::types::{Address, StacksEpochId}; use stacks_common::util::vrf::VRFProof; -use wsts::curve::point::Point; use crate::burnchains::PoxConstants; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index 19165db0a8..0b9b59a0e7 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -24,7 +24,6 @@ stacks-common = { path = "../../stacks-common" } chrono = "0.4.19" regex = "1" libsigner = { path = "../../libsigner" } -wsts = { workspace = true } url = "2.1.0" rand = { workspace = true } rand_core = { workspace = true } @@ -48,7 +47,6 @@ stacks = { package = "stackslib", path = "../../stackslib", features = ["default stacks-signer = { path = "../../stacks-signer", features = ["testing"] } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } -wsts = {workspace = true} mutants = "0.0.3" tiny_http = "0.12.0" http-types = "2.12" diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index ecc30a9c19..4d9d4f968e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -13,17 +13,13 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; use std::thread; use std::thread::JoinHandle; use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; -use clarity::vm::clarity::ClarityConnection; -use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; -use hashbrown::HashSet; -use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; -use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; +use clarity::vm::types::PrincipalData; +use libsigner::v0::messages::{MinerSlotID, SignerMessage}; use libsigner::StackerDBSession; use rand::{thread_rng, Rng}; use stacks::burnchains::Burnchain; @@ -32,7 +28,6 @@ use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::coordinator::OnChainRewardSetProvider; use stacks::chainstate::nakamoto::coordinator::load_nakamoto_reward_set; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; -use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::staging_blocks::NakamotoBlockObtainMethod; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use stacks::chainstate::stacks::boot::{RewardSet, MINERS_NAME}; @@ -46,11 +41,9 @@ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; use stacks::util::secp256k1::MessageSignature; -use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; use stacks_common::types::{PrivateKey, StacksEpochId}; use stacks_common::util::vrf::VRFProof; -use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; use super::sign_coordinator::SignCoordinator; @@ -290,7 +283,6 @@ impl BlockMinerThread { let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .map_err(|e| NakamotoNodeError::MiningFailure(ChainstateError::NetError(e)))?; - let mut attempts = 0; // now, actually run this tenure loop { #[cfg(test)] @@ -333,7 +325,7 @@ impl BlockMinerThread { } } - match self.mine_block(&stackerdbs) { + match self.mine_block() { Ok(x) => { if !self.validate_timestamp(&x)? { info!("Block mined too quickly. Will try again."; @@ -371,11 +363,9 @@ impl BlockMinerThread { if let Some(mut new_block) = new_block { Self::fault_injection_block_broadcast_stall(&new_block); - let (reward_set, signer_signature) = match self.gather_signatures( - &mut new_block, - &mut stackerdbs, - &mut attempts, - ) { + let (reward_set, signer_signature) = match self + .gather_signatures(&mut new_block, &mut stackerdbs) + { Ok(x) => x, Err(e) => match e { NakamotoNodeError::StacksTipChanged => { @@ -523,7 +513,6 @@ impl BlockMinerThread { &mut self, new_block: &mut NakamotoBlock, stackerdbs: &mut StackerDBs, - attempts: &mut u64, ) -> Result<(RewardSet, Vec), NakamotoNodeError> { let Some(miner_privkey) = self.config.miner.mining_key else { return Err(NakamotoNodeError::MinerConfigurationFailed( @@ -557,7 +546,6 @@ impl BlockMinerThread { }) })?; - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); let reward_set = self.load_signer_set()?; if self.config.get_node_config(false).mock_mining { @@ -566,7 +554,7 @@ impl BlockMinerThread { let mut coordinator = SignCoordinator::new( &reward_set, - miner_privkey_as_scalar, + miner_privkey, &self.config, self.globals.should_keep_running.clone(), ) @@ -583,10 +571,8 @@ impl BlockMinerThread { )) })?; - *attempts += 1; let signature = coordinator.run_sign_v0( new_block, - *attempts, &tip, &self.burnchain, &sort_db, @@ -599,125 +585,6 @@ impl BlockMinerThread { return Ok((reward_set, signature)); } - fn get_stackerdb_contract_and_slots( - &self, - stackerdbs: &StackerDBs, - msg_id: &MessageSlotID, - reward_cycle: u64, - ) -> Result<(QualifiedContractIdentifier, HashMap), NakamotoNodeError> { - let stackerdb_contracts = stackerdbs - .get_stackerdb_contract_ids() - .expect("FATAL: could not get the stacker DB contract ids"); - - let signers_contract_id = - msg_id.stacker_db_contract(self.config.is_mainnet(), reward_cycle); - if !stackerdb_contracts.contains(&signers_contract_id) { - return Err(NakamotoNodeError::SignerSignatureError( - "No signers contract found, cannot wait for signers".into(), - )); - }; - // Get the slots for every signer - let signers = stackerdbs - .get_signers(&signers_contract_id) - .expect("FATAL: could not get signers from stacker DB"); - let mut slot_ids_addresses = HashMap::with_capacity(signers.len()); - for (slot_id, address) in stackerdbs - .get_signers(&signers_contract_id) - .expect("FATAL: could not get signers from stacker DB") - .into_iter() - .enumerate() - { - slot_ids_addresses.insert( - u32::try_from(slot_id).expect("FATAL: too many signers to fit into u32 range"), - address, - ); - } - Ok((signers_contract_id, slot_ids_addresses)) - } - - fn get_signer_transactions( - &self, - chainstate: &mut StacksChainState, - sortdb: &SortitionDB, - stackerdbs: &StackerDBs, - ) -> Result, NakamotoNodeError> { - let next_reward_cycle = self - .burnchain - .block_height_to_reward_cycle(self.burn_block.block_height) - .expect("FATAL: no reward cycle for burn block") - .wrapping_add(1); - let (signers_contract_id, slot_ids_addresses) = self.get_stackerdb_contract_and_slots( - stackerdbs, - &MessageSlotID::Transactions, - next_reward_cycle, - )?; - let slot_ids = slot_ids_addresses.keys().cloned().collect::>(); - let addresses = slot_ids_addresses.values().cloned().collect::>(); - // Get the transactions from the signers for the next block - let signer_chunks = stackerdbs - .get_latest_chunks(&signers_contract_id, &slot_ids) - .expect("FATAL: could not get latest chunks from stacker DB"); - let signer_messages: Vec<(u32, SignerMessageV1)> = slot_ids - .iter() - .zip(signer_chunks.into_iter()) - .filter_map(|(slot_id, chunk)| { - chunk.and_then(|chunk| { - read_next::(&mut &chunk[..]) - .ok() - .map(|msg| (*slot_id, msg)) - }) - }) - .collect(); - - if signer_messages.is_empty() { - return Ok(vec![]); - } - - let (consensus_hash, block_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); - let stacks_block_id = StacksBlockId::new(&consensus_hash, &block_bhh); - - // Get all nonces for the signers from clarity DB to use to validate transactions - let account_nonces = chainstate - .with_read_only_clarity_tx( - &sortdb - .index_handle_at_block(chainstate, &stacks_block_id) - .map_err(|_| NakamotoNodeError::UnexpectedChainState)?, - &stacks_block_id, - |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - addresses - .iter() - .map(|address| { - ( - address.clone(), - clarity_db - .get_account_nonce(&address.clone().into()) - .unwrap_or(0), - ) - }) - .collect::>() - }) - }, - ) - .unwrap_or_default(); - let mut filtered_transactions: HashMap = HashMap::new(); - for (_slot, signer_message) in signer_messages { - match signer_message { - SignerMessageV1::Transactions(transactions) => { - NakamotoSigners::update_filtered_transactions( - &mut filtered_transactions, - &account_nonces, - self.config.is_mainnet(), - transactions, - ) - } - _ => {} // Any other message is ignored - } - } - Ok(filtered_transactions.into_values().collect()) - } - /// Fault injection -- possibly fail to broadcast /// Return true to drop the block fn fault_injection_broadcast_fail(&self) -> bool { @@ -842,7 +709,7 @@ impl BlockMinerThread { &sort_db, &self.burn_block, &stackerdbs, - SignerMessageV0::BlockPushed(block), + SignerMessage::BlockPushed(block), MinerSlotID::BlockPushed, chain_state.mainnet, &mut miners_session, @@ -1125,7 +992,7 @@ impl BlockMinerThread { #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a /// burnchain block-commit transaction. If we succeed, then return the assembled block. - fn mine_block(&mut self, stackerdbs: &StackerDBs) -> Result { + fn mine_block(&mut self) -> Result { debug!("block miner thread ID is {:?}", thread::current().id()); let burn_db_path = self.config.get_burn_db_file_path(); @@ -1173,9 +1040,6 @@ impl BlockMinerThread { parent_block_info.stacks_parent_header.microblock_tail = None; - let signer_transactions = - self.get_signer_transactions(&mut chain_state, &burn_db, &stackerdbs)?; - let signer_bitvec_len = reward_set.rewarded_addresses.len().try_into().ok(); // build the block itself @@ -1194,7 +1058,6 @@ impl BlockMinerThread { // we'll invoke the event dispatcher ourselves so that it calculates the // correct signer_sighash for `process_mined_nakamoto_block_event` Some(&self.event_dispatcher), - signer_transactions, signer_bitvec_len.unwrap_or(0), ) .map_err(|e| { diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index 35d578c0f1..f570009be5 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -21,7 +21,6 @@ use std::time::Duration; use hashbrown::{HashMap, HashSet}; use libsigner::v0::messages::{BlockResponse, MinerSlotID, SignerMessage as SignerMessageV0}; -use libsigner::v1::messages::{MessageSlotID, SignerMessage as SignerMessageV1}; use libsigner::{BlockProposal, SignerEntries, SignerEvent, SignerSession, StackerDBSession}; use stacks::burnchains::Burnchain; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -31,8 +30,6 @@ use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NA use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::Error as ChainstateError; -#[cfg(any(test, feature = "testing"))] -use stacks::chainstate::stacks::ThresholdSignature; use stacks::libstackerdb::StackerDBChunkData; use stacks::net::stackerdb::StackerDBs; use stacks::types::PublicKey; @@ -42,15 +39,6 @@ use stacks::util_lib::boot::boot_code_id; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; -use wsts::common::PolyCommitment; -#[cfg(any(test, feature = "testing"))] -use wsts::curve::ecdsa; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; -use wsts::state_machine::coordinator::fire::Coordinator as FireCoordinator; -use wsts::state_machine::coordinator::{Config as CoordinatorConfig, Coordinator}; -use wsts::state_machine::PublicKeys; -use wsts::v2::Aggregator; use super::Error as NakamotoNodeError; use crate::event_dispatcher::STACKER_DB_CHANNEL; @@ -66,17 +54,11 @@ pub static TEST_IGNORE_SIGNERS: std::sync::Mutex> = std::sync::Mute /// waking up to check timeouts? static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); -/// The `SignCoordinator` struct represents a WSTS FIRE coordinator whose -/// sole function is to serve as the coordinator for Nakamoto block signing. -/// This coordinator does not operate as a DKG coordinator. Rather, this struct -/// is used by Nakamoto miners to act as the coordinator for the blocks they -/// produce. +/// The `SignCoordinator` struct sole function is to serve as the coordinator for Nakamoto block signing. +/// This struct is used by Nakamoto miners to act as the coordinator for the blocks they produce. pub struct SignCoordinator { - coordinator: FireCoordinator, receiver: Option>, - message_key: Scalar, - #[cfg(any(test, feature = "testing"))] - wsts_public_keys: PublicKeys, + message_key: StacksPrivateKey, is_mainnet: bool, miners_session: StackerDBSession, signer_entries: HashMap, @@ -86,20 +68,6 @@ pub struct SignCoordinator { pub next_signer_bitvec: BitVec<4000>, } -pub struct NakamotoSigningParams { - /// total number of signers - pub num_signers: u32, - /// total number of keys - pub num_keys: u32, - /// threshold of keys needed to form a valid signature - pub threshold: u32, - /// map of signer_id to controlled key_ids - pub signer_key_ids: HashMap>, - /// ECDSA public keys as Point objects indexed by signer_id - pub signer_public_keys: HashMap, - pub wsts_public_keys: PublicKeys, -} - impl Drop for SignCoordinator { fn drop(&mut self) { STACKER_DB_CHANNEL.replace_receiver(self.receiver.take().expect( @@ -108,112 +76,13 @@ impl Drop for SignCoordinator { } } -impl NakamotoSigningParams { - pub fn parse( - is_mainnet: bool, - reward_set: &[NakamotoSignerEntry], - ) -> Result { - let parsed = SignerEntries::parse(is_mainnet, reward_set).map_err(|e| { - ChainstateError::InvalidStacksBlock(format!( - "Invalid Reward Set: Could not parse into WSTS structs: {e:?}" - )) - })?; - - let num_keys = parsed - .count_keys() - .expect("FATAL: more than u32::max() signers in the reward set"); - let num_signers = parsed - .count_signers() - .expect("FATAL: more than u32::max() signers in the reward set"); - let threshold = parsed - .get_signing_threshold() - .expect("FATAL: more than u32::max() signers in the reward set"); - - Ok(NakamotoSigningParams { - num_signers, - threshold, - num_keys, - signer_key_ids: parsed.coordinator_key_ids, - signer_public_keys: parsed.signer_public_keys, - wsts_public_keys: parsed.public_keys, - }) - } -} - -#[allow(dead_code)] -fn get_signer_commitments( - is_mainnet: bool, - reward_set: &[NakamotoSignerEntry], - stackerdbs: &StackerDBs, - reward_cycle: u64, - expected_aggregate_key: &Point, -) -> Result, ChainstateError> { - let commitment_contract = - MessageSlotID::DkgResults.stacker_db_contract(is_mainnet, reward_cycle); - let signer_set_len = u32::try_from(reward_set.len()) - .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set length exceeds u32".into()))?; - for signer_id in 0..signer_set_len { - let Some(signer_data) = stackerdbs.get_latest_chunk(&commitment_contract, signer_id)? - else { - warn!( - "Failed to fetch DKG result, will look for results from other signers."; - "signer_id" => signer_id - ); - continue; - }; - let Ok(SignerMessageV1::DkgResults { - aggregate_key, - party_polynomials, - }) = SignerMessageV1::consensus_deserialize(&mut signer_data.as_slice()) - else { - warn!( - "Failed to parse DKG result, will look for results from other signers."; - "signer_id" => signer_id, - ); - continue; - }; - - if &aggregate_key != expected_aggregate_key { - warn!( - "Aggregate key in DKG results does not match expected, will look for results from other signers."; - "expected" => %expected_aggregate_key, - "reported" => %aggregate_key, - ); - continue; - } - let computed_key = party_polynomials - .iter() - .fold(Point::default(), |s, (_, comm)| s + comm.poly[0]); - - if expected_aggregate_key != &computed_key { - warn!( - "Aggregate key computed from DKG results does not match expected, will look for results from other signers."; - "expected" => %expected_aggregate_key, - "computed" => %computed_key, - ); - continue; - } - - return Ok(party_polynomials); - } - error!( - "No valid DKG results found for the active signing set, cannot coordinate a group signature"; - "reward_cycle" => reward_cycle, - ); - Err(ChainstateError::InvalidStacksBlock( - "Failed to fetch DKG results for the active signer set".into(), - )) -} - impl SignCoordinator { /// * `reward_set` - the active reward set data, used to construct the signer /// set parameters. - /// * `message_key` - the signing key that the coordinator will use to sign messages - /// broadcasted to the signer set. this should be the miner's registered key. /// * `aggregate_public_key` - the active aggregate key for this cycle pub fn new( reward_set: &RewardSet, - message_key: Scalar, + message_key: StacksPrivateKey, config: &Config, keep_running: Arc, ) -> Result { @@ -224,6 +93,11 @@ impl SignCoordinator { return Err(ChainstateError::NoRegisteredSigners(0)); }; + let signer_entries = SignerEntries::parse(is_mainnet, reward_set_signers).map_err(|e| { + ChainstateError::InvalidStacksBlock(format!( + "Failed to parse NakamotoSignerEntries: {e:?}" + )) + })?; let rpc_socket = config .node .get_rpc_loopback() @@ -240,33 +114,11 @@ impl SignCoordinator { ) .expect("FATAL: unable to construct initial bitvec for signer set"); - let NakamotoSigningParams { - num_signers, - num_keys, - threshold, - signer_key_ids, - signer_public_keys, - wsts_public_keys, - } = NakamotoSigningParams::parse(is_mainnet, reward_set_signers.as_slice())?; debug!( "Initializing miner/coordinator"; - "num_signers" => num_signers, - "num_keys" => num_keys, - "threshold" => threshold, - "signer_key_ids" => ?signer_key_ids, - "signer_public_keys" => ?signer_public_keys, - "wsts_public_keys" => ?wsts_public_keys, + "num_signers" => signer_entries.signer_pks.len(), + "signer_public_keys" => ?signer_entries.signer_pks, ); - let coord_config = CoordinatorConfig { - num_signers, - num_keys, - threshold, - signer_key_ids, - signer_public_keys, - dkg_threshold: threshold, - message_private_key: message_key.clone(), - ..Default::default() - }; let total_weight = reward_set.total_signing_weight().map_err(|e| { warn!("Failed to calculate total weight for the reward set: {e:?}"); @@ -288,8 +140,6 @@ impl SignCoordinator { Ok((slot_id, signer)) }) .collect::, ChainstateError>>()?; - - let coordinator: FireCoordinator = FireCoordinator::new(coord_config); #[cfg(test)] { // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING @@ -303,10 +153,8 @@ impl SignCoordinator { warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); } let sign_coordinator = Self { - coordinator, message_key, receiver: Some(receiver), - wsts_public_keys, is_mainnet, miners_session, next_signer_bitvec, @@ -325,11 +173,8 @@ impl SignCoordinator { } Ok(Self { - coordinator, - message_key, receiver: Some(receiver), - #[cfg(any(test, feature = "testing"))] - wsts_public_keys, + message_key, is_mainnet, miners_session, next_signer_bitvec, @@ -340,40 +185,6 @@ impl SignCoordinator { }) } - fn get_sign_id(burn_block_height: u64, burnchain: &Burnchain) -> u64 { - burnchain - .pox_constants - .reward_cycle_index(burnchain.first_block_height, burn_block_height) - .expect("FATAL: tried to initialize WSTS coordinator before first burn block height") - } - - /// Send a message over the miners contract using a `Scalar` private key - fn send_miners_message_scalar( - message_key: &Scalar, - sortdb: &SortitionDB, - tip: &BlockSnapshot, - stackerdbs: &StackerDBs, - message: M, - miner_slot_id: MinerSlotID, - is_mainnet: bool, - miners_session: &mut StackerDBSession, - election_sortition: &ConsensusHash, - ) -> Result<(), String> { - let mut miner_sk = StacksPrivateKey::from_slice(&message_key.to_bytes()).unwrap(); - miner_sk.set_compress_public(true); - Self::send_miners_message( - &miner_sk, - sortdb, - tip, - stackerdbs, - message, - miner_slot_id, - is_mainnet, - miners_session, - election_sortition, - ) - } - /// Send a message over the miners contract using a `StacksPrivateKey` pub fn send_miners_message( miner_sk: &StacksPrivateKey, @@ -425,221 +236,6 @@ impl SignCoordinator { } } - #[cfg_attr(test, mutants::skip)] - #[cfg(any(test, feature = "testing"))] - pub fn begin_sign_v1( - &mut self, - block: &NakamotoBlock, - burn_block_height: u64, - block_attempt: u64, - burn_tip: &BlockSnapshot, - burnchain: &Burnchain, - sortdb: &SortitionDB, - stackerdbs: &StackerDBs, - counters: &Counters, - election_sortiton: &ConsensusHash, - ) -> Result { - let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); - let sign_iter_id = block_attempt; - let reward_cycle_id = burnchain - .block_height_to_reward_cycle(burn_tip.block_height) - .expect("FATAL: tried to initialize coordinator before first burn block height"); - self.coordinator.current_sign_id = sign_id; - self.coordinator.current_sign_iter_id = sign_iter_id; - - let proposal_msg = BlockProposal { - block: block.clone(), - burn_height: burn_block_height, - reward_cycle: reward_cycle_id, - }; - - let block_bytes = proposal_msg.serialize_to_vec(); - let nonce_req_msg = self - .coordinator - .start_signing_round(&block_bytes, false, None) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to start signing round in FIRE coordinator: {e:?}" - )) - })?; - Self::send_miners_message_scalar::( - &self.message_key, - sortdb, - burn_tip, - &stackerdbs, - nonce_req_msg.into(), - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - election_sortiton, - ) - .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; - counters.bump_naka_proposed_blocks(); - #[cfg(test)] - { - // In test mode, short-circuit waiting for the signers if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(_signatures) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() - { - debug!("Short-circuiting waiting for signers, using test signature"); - return Ok(ThresholdSignature::empty()); - } - } - - let Some(ref mut receiver) = self.receiver else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the StackerDB event receiver".into(), - )); - }; - - loop { - let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { - Ok(event) => event, - Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { - continue; - } - Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "StackerDB event receiver disconnected".into(), - )) - } - }; - - let is_signer_event = - event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); - if !is_signer_event { - debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); - continue; - } - let modified_slots = &event.modified_slots; - - // Update `next_signers_bitvec` with the slots that were modified in the event - modified_slots.iter().for_each(|chunk| { - if let Ok(slot_id) = chunk.slot_id.try_into() { - match &self.next_signer_bitvec.set(slot_id, true) { - Err(e) => { - warn!("Failed to set bitvec for next signer: {e:?}"); - } - _ => (), - }; - } else { - error!("FATAL: slot_id greater than u16, which should never happen."); - } - }); - - let Ok(signer_event) = SignerEvent::try_from(event).map_err(|e| { - warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); - }) else { - continue; - }; - let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { - debug!("Received signer event other than a signer message. Ignoring."); - continue; - }; - if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - debug!("Received signer event for other reward cycle. Ignoring."); - continue; - }; - debug!("Miner/Coordinator: Received messages from signers"; "count" => messages.len()); - let coordinator_pk = ecdsa::PublicKey::new(&self.message_key).map_err(|_e| { - NakamotoNodeError::MinerSignatureError("Bad signing key for the FIRE coordinator") - })?; - let packets: Vec<_> = messages - .into_iter() - .filter_map(|msg| match msg { - SignerMessageV1::DkgResults { .. } - | SignerMessageV1::BlockResponse(_) - | SignerMessageV1::EncryptedSignerState(_) - | SignerMessageV1::Transactions(_) => None, - SignerMessageV1::Packet(packet) => { - debug!("Received signers packet: {packet:?}"); - if !packet.verify(&self.wsts_public_keys, &coordinator_pk) { - warn!("Failed to verify StackerDB packet: {packet:?}"); - None - } else { - Some(packet) - } - } - }) - .collect(); - let (outbound_msgs, op_results) = self - .coordinator - .process_inbound_messages(&packets) - .unwrap_or_else(|e| { - error!( - "Miner/Coordinator: Failed to process inbound message packets"; - "err" => ?e - ); - (vec![], vec![]) - }); - for operation_result in op_results.into_iter() { - match operation_result { - wsts::state_machine::OperationResult::Dkg { .. } - | wsts::state_machine::OperationResult::SignTaproot(_) - | wsts::state_machine::OperationResult::DkgError(_) => { - debug!("Ignoring unrelated operation result"); - } - wsts::state_machine::OperationResult::Sign(signature) => { - // check if the signature actually corresponds to our block? - let block_sighash = block.header.signer_signature_hash(); - let verified = signature.verify( - self.coordinator.aggregate_public_key.as_ref().unwrap(), - &block_sighash.0, - ); - let signature = ThresholdSignature(signature); - if !verified { - warn!( - "Processed signature but didn't validate over the expected block. Returning error."; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash - ); - return Err(NakamotoNodeError::SignerSignatureError( - "Signature failed to validate over the expected block".into(), - )); - } else { - info!( - "SignCoordinator: Generated a valid signature for the block"; - "next_signer_bitvec" => self.next_signer_bitvec.binary_str(), - ); - return Ok(signature); - } - } - wsts::state_machine::OperationResult::SignError(e) => { - return Err(NakamotoNodeError::SignerSignatureError(format!( - "Signing failed: {e:?}" - ))) - } - } - } - for msg in outbound_msgs { - match Self::send_miners_message_scalar::( - &self.message_key, - sortdb, - burn_tip, - stackerdbs, - msg.into(), - // TODO: note, in v1, we'll want to add a new slot, but for now, it just shares - // with the block proposal - MinerSlotID::BlockProposal, - self.is_mainnet, - &mut self.miners_session, - election_sortiton, - ) { - Ok(()) => { - debug!("Miner/Coordinator: sent outbound message."); - } - Err(e) => { - warn!( - "Miner/Coordinator: Failed to send message to StackerDB instance: {e:?}." - ); - } - }; - } - } - } - /// Do we ignore signer signatures? #[cfg(test)] fn fault_injection_ignore_signatures() -> bool { @@ -682,7 +278,6 @@ impl SignCoordinator { pub fn run_sign_v0( &mut self, block: &NakamotoBlock, - block_attempt: u64, burn_tip: &BlockSnapshot, burnchain: &Burnchain, sortdb: &SortitionDB, @@ -691,13 +286,9 @@ impl SignCoordinator { counters: &Counters, election_sortition: &ConsensusHash, ) -> Result, NakamotoNodeError> { - let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); - let sign_iter_id = block_attempt; let reward_cycle_id = burnchain .block_height_to_reward_cycle(burn_tip.block_height) .expect("FATAL: tried to initialize coordinator before first burn block height"); - self.coordinator.current_sign_id = sign_id; - self.coordinator.current_sign_iter_id = sign_iter_id; let block_proposal = BlockProposal { block: block.clone(), @@ -709,7 +300,7 @@ impl SignCoordinator { debug!("Sending block proposal message to signers"; "signer_signature_hash" => %block.header.signer_signature_hash(), ); - Self::send_miners_message_scalar::( + Self::send_miners_message::( &self.message_key, sortdb, burn_tip, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 17b829557f..3c238153ac 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -28,8 +28,7 @@ use clarity::vm::{ClarityName, ClarityVersion, Value}; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; use libsigner::v0::messages::SignerMessage as SignerMessageV0; -use libsigner::v1::messages::SignerMessage as SignerMessageV1; -use libsigner::{BlockProposal, SignerSession, StackerDBSession}; +use libsigner::{SignerSession, StackerDBSession}; use rand::RngCore; use stacks::burnchains::{MagicBytes, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; @@ -91,7 +90,6 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp use stacks_common::util::{get_epoch_time_secs, sleep_ms}; use stacks_signer::chainstate::{ProposalEvalConfig, SortitionsView}; use stacks_signer::signerdb::{BlockInfo, BlockState, ExtraBlockInfo, SignerDb}; -use wsts::net::Message; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; @@ -441,27 +439,6 @@ pub fn get_latest_block_proposal( Ok((proposed_block, pubkey)) } -#[allow(dead_code)] -fn get_block_proposal_msg_v1( - miners_stackerdb: &mut StackerDBSession, - slot_id: u32, -) -> NakamotoBlock { - let message: SignerMessageV1 = miners_stackerdb - .get_latest(slot_id) - .expect("Failed to get latest chunk from the miner slot ID") - .expect("No chunk found"); - let SignerMessageV1::Packet(packet) = message else { - panic!("Expected a signer message packet. Got {message:?}"); - }; - let Message::NonceRequest(nonce_request) = packet.msg else { - panic!("Expected a nonce request. Got {:?}", packet.msg); - }; - let block_proposal = - BlockProposal::consensus_deserialize(&mut nonce_request.message.as_slice()) - .expect("Failed to deserialize block proposal"); - block_proposal.block -} - pub fn read_and_sign_block_proposal( configs: &[&Config], signers: &TestSigners, @@ -894,9 +871,8 @@ pub fn boot_to_epoch_3( if let Some(signers) = self_signing { // Get the aggregate key let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = - clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) + .expect("Failed to serialize aggregate public key"); let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); // Vote on the aggregate public key @@ -1049,9 +1025,8 @@ pub fn boot_to_pre_epoch_3_boundary( if let Some(signers) = self_signing { // Get the aggregate key let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = - clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) + .expect("Failed to serialize aggregate public key"); let signer_sks_unique: HashMap<_, _> = signer_sks.iter().map(|x| (x.to_hex(), x)).collect(); let signer_set = get_stacker_set(&http_origin, reward_cycle + 1); // Vote on the aggregate public key @@ -1206,9 +1181,8 @@ fn signer_vote_if_needed( // Get the aggregate key let aggregate_key = signers.clone().generate_aggregate_key(reward_cycle + 1); - let aggregate_public_key = - clarity::vm::Value::buff_from(aggregate_key.compress().data.to_vec()) - .expect("Failed to serialize aggregate public key"); + let aggregate_public_key = clarity::vm::Value::buff_from(aggregate_key) + .expect("Failed to serialize aggregate public key"); for (i, signer_sk) in signer_sks.iter().enumerate() { let signer_nonce = get_account(&http_origin, &to_addr(signer_sk)).nonce; diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index a25a010465..4248e72145 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -13,7 +13,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . mod v0; -mod v1; use std::collections::HashSet; // Copyright (C) 2020-2024 Stacks Open Internet Foundation @@ -42,7 +41,7 @@ use libsigner::{SignerEntries, SignerEventTrait}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; -use stacks::chainstate::stacks::{StacksPrivateKey, ThresholdSignature}; +use stacks::chainstate::stacks::StacksPrivateKey; use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, @@ -54,12 +53,11 @@ use stacks::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; +use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_signer::client::{ClientError, SignerSlotID, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; use stacks_signer::runloop::{SignerResult, State, StateInfo}; use stacks_signer::{Signer, SpawnedSigner}; -use wsts::state_machine::PublicKeys; use super::nakamoto_integrations::wait_for; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; @@ -76,7 +74,7 @@ use crate::tests::neon_integrations::{ wait_for_runloop, }; use crate::tests::to_addr; -use crate::{BitcoinRegtestController, BurnchainController}; +use crate::BitcoinRegtestController; // Helper struct for holding the btc and stx neon nodes #[allow(dead_code)] @@ -110,8 +108,6 @@ pub struct SignerTest { pub signer_stacks_private_keys: Vec, // link to the stacks node pub stacks_client: StacksClient, - // Unique number used to isolate files created during the test - pub run_stamp: u16, /// The number of cycles to stack for pub num_stacking_cycles: u64, } @@ -224,7 +220,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest { - panic!("Recieved an operation result."); - } - SignerResult::StatusCheck(state_info) => { - output.push(Some(state_info)); - } - } + output.push(Some(state_info)); } output } - fn nmb_blocks_to_reward_set_calculation(&mut self) -> u64 { - let prepare_phase_len = self - .running_nodes - .conf - .get_burnchain() - .pox_constants - .prepare_length as u64; - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 - let curr_reward_cycle = self.get_current_reward_cycle(); - let next_reward_cycle = curr_reward_cycle.saturating_add(1); - let next_reward_cycle_height = self - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(next_reward_cycle); - let next_reward_cycle_reward_set_calculation = next_reward_cycle_height - .saturating_sub(prepare_phase_len) - .saturating_add(1); // +1 as the reward calculation occurs in the SECOND block of the prepare phase/ - - next_reward_cycle_reward_set_calculation.saturating_sub(current_block_height) - } - - fn nmb_blocks_to_reward_cycle_boundary(&mut self, reward_cycle: u64) -> u64 { - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 - let reward_cycle_height = self - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(reward_cycle); - reward_cycle_height.saturating_sub(current_block_height) - } - fn mine_nakamoto_block(&mut self, timeout: Duration) -> MinedNakamotoBlockEvent { let commits_submitted = self.running_nodes.commits_submitted.clone(); let mined_block_time = Instant::now(); @@ -421,20 +369,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest ThresholdSignature { - let block_obj = self.wait_for_confirmed_block_with_hash(block_signer_sighash, timeout); - let signer_signature_hex = block_obj.get("signer_signature").unwrap().as_str().unwrap(); - let signer_signature_bytes = hex_bytes(&signer_signature_hex[2..]).unwrap(); - let signer_signature = - ThresholdSignature::consensus_deserialize(&mut signer_signature_bytes.as_slice()) - .unwrap(); - signer_signature - } - /// Wait for a confirmed block and return a list of individual /// signer signatures fn wait_for_confirmed_block_v0( @@ -558,22 +492,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest SignerSlotID { - let valid_signer_set = - u32::try_from(reward_cycle % 2).expect("FATAL: reward_cycle % 2 exceeds u32::MAX"); - let signer_stackerdb_contract_id = boot_code_id(SIGNERS_NAME, false); - - self.stacks_client - .get_stackerdb_signer_slots(&signer_stackerdb_contract_id, valid_signer_set) - .expect("FATAL: failed to get signer slots from stackerdb") - .iter() - .position(|(address, _)| address == self.stacks_client.get_signer_address()) - .map(|pos| { - SignerSlotID(u32::try_from(pos).expect("FATAL: number of signers exceeds u32::MAX")) - }) - .expect("FATAL: signer not registered") - } - fn get_signer_slots( &self, reward_cycle: u64, @@ -597,11 +515,11 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest>() } - /// Get the wsts public keys for the given reward cycle - fn get_signer_public_keys(&self, reward_cycle: u64) -> PublicKeys { + /// Get the signer public keys for the given reward cycle + fn get_signer_public_keys(&self, reward_cycle: u64) -> Vec { let entries = self.get_reward_set_signers(reward_cycle); let entries = SignerEntries::parse(false, &entries).unwrap(); - entries.public_keys + entries.signer_pks } /// Get the signers for the given reward cycle @@ -630,42 +548,6 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest StacksPrivateKey { - let spawned_signer = self.spawned_signers.remove(signer_idx); - let signer_key = self.signer_stacks_private_keys.remove(signer_idx); - - spawned_signer.stop(); - signer_key - } - - /// (Re)starts a new signer runloop with the given private key - pub fn restart_signer(&mut self, signer_idx: usize, signer_private_key: StacksPrivateKey) { - let signer_config = build_signer_config_tomls( - &[signer_private_key], - &self.running_nodes.conf.node.rpc_bind, - Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. - &Network::Testnet, - "12345", // It worked sir, we have the combination! -Great, what's the combination? - self.run_stamp, - 3000 + signer_idx, - Some(100_000), - None, - Some(9000 + signer_idx), - ) - .pop() - .unwrap(); - - info!("Restarting signer"); - let config = SignerConfig::load_from_str(&signer_config).unwrap(); - let signer = SpawnedSigner::new(config); - self.spawned_signers.insert(signer_idx, signer); - } - pub fn shutdown(self) { self.running_nodes .coord_channel diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2ec72082a6..3f0813b024 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2501,8 +2501,7 @@ fn mock_sign_epoch_25() { .iter() .map(|id| id.0) .collect(); - let signer_keys = signer_test.get_signer_public_keys(reward_cycle); - let signer_public_keys: Vec<_> = signer_keys.signers.into_values().collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); assert_eq!(signer_slot_ids.len(), num_signers); let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); @@ -2714,8 +2713,7 @@ fn multiple_miners_mock_sign_epoch_25() { .iter() .map(|id| id.0) .collect(); - let signer_keys = signer_test.get_signer_public_keys(reward_cycle); - let signer_public_keys: Vec<_> = signer_keys.signers.into_values().collect(); + let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); assert_eq!(signer_slot_ids.len(), num_signers); let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); diff --git a/testnet/stacks-node/src/tests/signer/v1.rs b/testnet/stacks-node/src/tests/signer/v1.rs deleted file mode 100644 index 816db4c5dc..0000000000 --- a/testnet/stacks-node/src/tests/signer/v1.rs +++ /dev/null @@ -1,1155 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . -use std::collections::HashSet; -use std::sync::atomic::Ordering; -use std::time::{Duration, Instant}; -use std::{env, thread}; - -use clarity::boot_util::boot_code_id; -use clarity::vm::Value; -use libsigner::v1::messages::{BlockResponse, MessageSlotID, RejectCode, SignerMessage}; -use libsigner::BlockProposal; -use rand::thread_rng; -use rand_core::RngCore; -use stacks::burnchains::Txid; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; -use stacks::chainstate::stacks::boot::{SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME}; -use stacks::chainstate::stacks::events::StackerDBChunksEvent; -use stacks::chainstate::stacks::miner::TransactionEvent; -use stacks::chainstate::stacks::{ - StacksPrivateKey, StacksTransaction, TransactionAnchorMode, TransactionAuth, - TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, -}; -use stacks::util_lib::strings::StacksString; -use stacks_common::bitvec::BitVec; -use stacks_common::codec::StacksMessageCodec; -use stacks_common::consts::CHAIN_ID_TESTNET; -use stacks_common::types::chainstate::{ - ConsensusHash, StacksAddress, StacksBlockId, StacksPublicKey, TrieHash, -}; -use stacks_common::util::hash::{hex_bytes, MerkleTree, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::MessageSignature; -use stacks_signer::client::{SignerSlotID, StacksClient}; -use stacks_signer::runloop::{RunLoopCommand, SignerCommand, SignerResult}; -use stacks_signer::v1::coordinator::CoordinatorSelector; -use stacks_signer::v1::stackerdb_manager::StackerDBManager; -use stacks_signer::v1::SpawnedSigner; -use tracing_subscriber::prelude::*; -use tracing_subscriber::{fmt, EnvFilter}; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; -use wsts::net::Message; -use wsts::state_machine::OperationResult; - -use super::SignerTest; -use crate::event_dispatcher::MinedNakamotoBlockEvent; -use crate::tests::nakamoto_integrations::{ - boot_to_epoch_3_reward_set, boot_to_epoch_3_reward_set_calculation_boundary, next_block_and, -}; -use crate::tests::neon_integrations::{next_block_and_wait, test_observer}; -use crate::tests::to_addr; -use crate::BurnchainController; - -impl SignerTest { - fn boot_to_epoch_3(&mut self, timeout: Duration) -> Point { - boot_to_epoch_3_reward_set( - &self.running_nodes.conf, - &self.running_nodes.blocks_processed, - &self.signer_stacks_private_keys, - &self.signer_stacks_private_keys, - &mut self.running_nodes.btc_regtest_controller, - Some(self.num_stacking_cycles), - ); - let dkg_vote = self.wait_for_dkg(timeout); - - // Advance and mine the DKG key block - self.run_until_epoch_3_boundary(); - - let reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - assert_eq!(set_dkg, dkg_vote); - - let (vrfs_submitted, commits_submitted) = ( - self.running_nodes.vrfs_submitted.clone(), - self.running_nodes.commits_submitted.clone(), - ); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - info!("Successfully triggered first block to wake up the miner runloop."); - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); - info!("Ready to mine Nakamoto blocks!"); - set_dkg - } - - // Only call after already past the epoch 3.0 boundary - fn run_to_dkg(&mut self, timeout: Duration) -> Option { - let curr_reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(curr_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - let nmb_blocks_to_mine_to_dkg = self.nmb_blocks_to_reward_set_calculation(); - let end_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1) // Must subtract 1 since get_headers_height returns current block height + 1 - .saturating_add(nmb_blocks_to_mine_to_dkg); - info!("Mining {nmb_blocks_to_mine_to_dkg} bitcoin block(s) to reach DKG calculation at bitcoin height {end_block_height}"); - for i in 1..=nmb_blocks_to_mine_to_dkg { - info!("Mining bitcoin block #{i} and nakamoto tenure of {nmb_blocks_to_mine_to_dkg}"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); - } - if nmb_blocks_to_mine_to_dkg == 0 { - None - } else { - Some(self.wait_for_dkg(timeout)) - } - } - - // Only call after already past the epoch 3.0 boundary - fn run_until_burnchain_height_nakamoto( - &mut self, - timeout: Duration, - burnchain_height: u64, - ) -> Vec { - let mut points = vec![]; - let current_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height(); - let mut total_nmb_blocks_to_mine = burnchain_height.saturating_sub(current_block_height); - debug!("Mining {total_nmb_blocks_to_mine} Nakamoto block(s) to reach burnchain height {burnchain_height}"); - let mut nmb_blocks_to_reward_cycle = 0; - let mut blocks_to_dkg = self.nmb_blocks_to_reward_set_calculation(); - while total_nmb_blocks_to_mine > 0 && blocks_to_dkg > 0 { - if blocks_to_dkg > 0 && total_nmb_blocks_to_mine >= blocks_to_dkg { - let dkg = self.run_to_dkg(timeout); - total_nmb_blocks_to_mine -= blocks_to_dkg; - if dkg.is_some() { - points.push(dkg.unwrap()); - } - blocks_to_dkg = 0; - nmb_blocks_to_reward_cycle = self.nmb_blocks_to_reward_cycle_boundary( - self.get_current_reward_cycle().saturating_add(1), - ) - } - if total_nmb_blocks_to_mine >= nmb_blocks_to_reward_cycle { - let end_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1) // Must subtract 1 since get_headers_height returns current block height + 1 - .saturating_add(nmb_blocks_to_reward_cycle); - debug!("Mining {nmb_blocks_to_reward_cycle} Nakamoto block(s) to reach the next reward cycle boundary at {end_block_height}."); - for i in 1..=nmb_blocks_to_reward_cycle { - debug!("Mining Nakamoto block #{i} of {nmb_blocks_to_reward_cycle}"); - let curr_reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(curr_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); - } - total_nmb_blocks_to_mine -= nmb_blocks_to_reward_cycle; - nmb_blocks_to_reward_cycle = 0; - blocks_to_dkg = self.nmb_blocks_to_reward_set_calculation(); - } - } - for i in 1..=total_nmb_blocks_to_mine { - info!("Mining Nakamoto block #{i} of {total_nmb_blocks_to_mine} to reach {burnchain_height}"); - let curr_reward_cycle = self.get_current_reward_cycle(); - let set_dkg = self - .stacks_client - .get_approved_aggregate_key(curr_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); - } - points - } - - fn mine_and_verify_confirmed_naka_block( - &mut self, - agg_key: &Point, - timeout: Duration, - ) -> MinedNakamotoBlockEvent { - let new_block = self.mine_nakamoto_block(timeout); - let signer_sighash = new_block.signer_signature_hash.clone(); - let signature = self.wait_for_confirmed_block_v1(&signer_sighash, timeout); - assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); - new_block - } - - fn wait_for_dkg(&mut self, timeout: Duration) -> Point { - debug!("Waiting for DKG..."); - let mut key = Point::default(); - let dkg_now = Instant::now(); - for signer in self.spawned_signers.iter() { - let mut aggregate_public_key = None; - loop { - let results = signer - .res_recv - .recv_timeout(timeout) - .expect("failed to recv dkg results"); - for result in results { - match result { - SignerResult::OperationResult(OperationResult::Dkg(point)) => { - info!("Received aggregate_group_key {point}"); - aggregate_public_key = Some(point); - } - SignerResult::OperationResult(other) => { - panic!("{}", operation_panic_message(&other)) - } - SignerResult::StatusCheck(state) => { - panic!("Received status check result: {:?}", state); - } - } - } - if aggregate_public_key.is_some() || dkg_now.elapsed() > timeout { - break; - } - } - key = aggregate_public_key.expect(&format!( - "Failed to get aggregate public key within {timeout:?}" - )); - } - debug!("Finished waiting for DKG!"); - key - } - - fn generate_invalid_transactions(&self) -> Vec { - let host = self.running_nodes.conf.node.rpc_bind.clone(); - // Get the signer indices - let reward_cycle = self.get_current_reward_cycle(); - - let signer_private_key = self.signer_stacks_private_keys[0]; - - let vote_contract_id = boot_code_id(SIGNERS_VOTING_NAME, false); - let contract_addr = vote_contract_id.issuer.into(); - let contract_name = vote_contract_id.name.clone(); - - let signer_index = thread_rng().next_u64(); - let signer_index_arg = Value::UInt(signer_index as u128); - - let point = Point::from(Scalar::random(&mut thread_rng())); - let point_arg = - Value::buff_from(point.compress().data.to_vec()).expect("Failed to create buff"); - - let round = thread_rng().next_u64(); - let round_arg = Value::UInt(round as u128); - - let reward_cycle_arg = Value::UInt(reward_cycle as u128); - let valid_function_args = vec![ - signer_index_arg.clone(), - point_arg.clone(), - round_arg.clone(), - reward_cycle_arg.clone(), - ]; - - // Create a invalid transaction that is not a contract call - let invalid_not_contract_call = StacksTransaction { - version: TransactionVersion::Testnet, - chain_id: CHAIN_ID_TESTNET, - auth: TransactionAuth::from_p2pkh(&signer_private_key).unwrap(), - anchor_mode: TransactionAnchorMode::Any, - post_condition_mode: TransactionPostConditionMode::Allow, - post_conditions: vec![], - payload: TransactionPayload::SmartContract( - TransactionSmartContract { - name: "test-contract".into(), - code_body: StacksString::from_str("(/ 1 0)").unwrap(), - }, - None, - ), - }; - let invalid_contract_address = StacksClient::build_unsigned_contract_call_transaction( - &StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&signer_private_key)), - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_contract_name = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - "bad-signers-contract-name".into(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_signers_vote_function = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - "some-other-function".into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_signer_index = - StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - point_arg.clone(), - point_arg.clone(), - round_arg.clone(), - reward_cycle_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_key = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - signer_index_arg.clone(), - signer_index_arg.clone(), - round_arg.clone(), - reward_cycle_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_round = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - signer_index_arg.clone(), - point_arg.clone(), - point_arg.clone(), - reward_cycle_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_function_arg_reward_cycle = - StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &[ - signer_index_arg.clone(), - point_arg.clone(), - round_arg.clone(), - point_arg.clone(), - ], - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 1, - ) - .unwrap(); - - let invalid_nonce = StacksClient::build_unsigned_contract_call_transaction( - &contract_addr, - contract_name.clone(), - SIGNERS_VOTING_FUNCTION_NAME.into(), - &valid_function_args, - &signer_private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 0, // Old nonce - ) - .unwrap(); - - let invalid_stacks_client = StacksClient::new( - StacksPrivateKey::new(), - host, - "12345".to_string(), // That's amazing. I've got the same combination on my luggage! - false, - ); - let invalid_signer_tx = invalid_stacks_client - .build_unsigned_vote_for_aggregate_public_key(0, round, point, reward_cycle, 0) - .expect("FATAL: failed to build vote for aggregate public key"); - - let unsigned_txs = vec![ - invalid_nonce, - invalid_not_contract_call, - invalid_contract_name, - invalid_contract_address, - invalid_signers_vote_function, - invalid_function_arg_key, - invalid_function_arg_reward_cycle, - invalid_function_arg_round, - invalid_function_arg_signer_index, - invalid_signer_tx, - ]; - unsigned_txs - .into_iter() - .map(|unsigned| { - invalid_stacks_client - .sign_transaction(unsigned) - .expect("Failed to sign transaction") - }) - .collect() - } -} - -fn operation_panic_message(result: &OperationResult) -> String { - match result { - OperationResult::Sign(sig) => { - format!("Received Signature ({},{})", sig.R, sig.z) - } - OperationResult::SignTaproot(proof) => { - format!("Received SchnorrProof ({},{})", proof.r, proof.s) - } - OperationResult::DkgError(dkg_error) => { - format!("Received DkgError {:?}", dkg_error) - } - OperationResult::SignError(sign_error) => { - format!("Received SignError {}", sign_error) - } - OperationResult::Dkg(point) => { - format!("Received aggregate_group_key {point}") - } - } -} - -#[test] -#[ignore] -/// Test the signer can respond to external commands to perform DKG -fn dkg() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let timeout = Duration::from_secs(200); - let mut signer_test = SignerTest::new(10, vec![]); - info!("Boot to epoch 3.0 reward calculation..."); - boot_to_epoch_3_reward_set( - &signer_test.running_nodes.conf, - &signer_test.running_nodes.blocks_processed, - &signer_test.signer_stacks_private_keys, - &signer_test.signer_stacks_private_keys, - &mut signer_test.running_nodes.btc_regtest_controller, - Some(signer_test.num_stacking_cycles), - ); - - info!("Pox 4 activated and at epoch 3.0 reward set calculation (2nd block of its prepare phase)! Ready for signers to perform DKG and Sign!"); - // First wait for the automatically triggered DKG to complete - let key = signer_test.wait_for_dkg(timeout); - - info!("------------------------- Test DKG -------------------------"); - let reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); - - // Determine the coordinator of the current node height - info!("signer_runloop: spawn send commands to do dkg"); - let dkg_now = Instant::now(); - for signer in signer_test.spawned_signers.iter() { - signer - .cmd_send - .send(RunLoopCommand { - reward_cycle, - command: SignerCommand::Dkg, - }) - .expect("failed to send DKG command"); - } - let new_key = signer_test.wait_for_dkg(timeout); - let dkg_elapsed = dkg_now.elapsed(); - assert_ne!(new_key, key); - - info!("DKG Time Elapsed: {:.2?}", dkg_elapsed); -} - -#[test] -#[ignore] -/// Test the signer rejects requests to sign that do not come from a miner -fn sign_request_rejected() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - - info!("Creating invalid blocks to sign..."); - let header1 = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - timestamp: 8, - miner_signature: MessageSignature::empty(), - signer_signature: vec![], - pox_treatment: BitVec::zeros(1).unwrap(), - }; - let mut block1 = NakamotoBlock { - header: header1, - txs: vec![], - }; - let tx_merkle_root1 = { - let txid_vecs = block1 - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - MerkleTree::::new(&txid_vecs).root() - }; - block1.header.tx_merkle_root = tx_merkle_root1; - - let header2 = NakamotoBlockHeader { - version: 1, - chain_length: 3, - burn_spent: 4, - consensus_hash: ConsensusHash([0x05; 20]), - parent_block_id: StacksBlockId([0x06; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), - state_index_root: TrieHash([0x08; 32]), - timestamp: 9, - miner_signature: MessageSignature::empty(), - signer_signature: vec![], - pox_treatment: BitVec::zeros(1).unwrap(), - }; - let mut block2 = NakamotoBlock { - header: header2, - txs: vec![], - }; - let tx_merkle_root2 = { - let txid_vecs = block2 - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - MerkleTree::::new(&txid_vecs).root() - }; - block2.header.tx_merkle_root = tx_merkle_root2; - - let timeout = Duration::from_secs(200); - let mut signer_test: SignerTest = SignerTest::new(10, vec![]); - let _key = signer_test.boot_to_epoch_3(timeout); - - info!("------------------------- Test Sign -------------------------"); - let reward_cycle = signer_test.get_current_reward_cycle(); - let block_proposal_1 = BlockProposal { - block: block1.clone(), - burn_height: 0, - reward_cycle, - }; - let block_proposal_2 = BlockProposal { - block: block2.clone(), - burn_height: 0, - reward_cycle, - }; - // Determine the coordinator of the current node height - info!("signer_runloop: spawn send commands to do sign"); - let sign_now = Instant::now(); - let sign_command = RunLoopCommand { - reward_cycle, - command: SignerCommand::Sign { - block_proposal: block_proposal_1, - is_taproot: false, - merkle_root: None, - }, - }; - let sign_taproot_command = RunLoopCommand { - reward_cycle, - command: SignerCommand::Sign { - block_proposal: block_proposal_2, - is_taproot: true, - merkle_root: None, - }, - }; - for signer in signer_test.spawned_signers.iter() { - signer - .cmd_send - .send(sign_command.clone()) - .expect("failed to send sign command"); - signer - .cmd_send - .send(sign_taproot_command.clone()) - .expect("failed to send sign taproot command"); - } - - // Don't wait for signatures. Because the block miner is acting as - // the coordinator, signers won't directly sign commands issued by someone - // other than the miner. Rather, they'll just broadcast their rejections. - - let sign_elapsed = sign_now.elapsed(); - - info!("------------------------- Test Block Rejected -------------------------"); - - // Verify the signers rejected the proposed block - let t_start = Instant::now(); - let signer_message = loop { - assert!( - t_start.elapsed() < Duration::from_secs(30), - "Timed out while waiting for signers block response stacker db event" - ); - - let nakamoto_blocks = test_observer::get_stackerdb_chunks(); - if let Some(message) = find_block_response(nakamoto_blocks) { - break message; - } - thread::sleep(Duration::from_secs(1)); - }; - if let SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) = signer_message { - assert!(matches!( - rejection.reason_code, - RejectCode::ValidationFailed(_) - )); - } else { - panic!("Received unexpected message: {:?}", &signer_message); - } - info!("Sign Time Elapsed: {:.2?}", sign_elapsed); -} - -#[test] -#[ignore] -/// Test that a signer can be offline when a DKG round has commenced and -/// can rejoin the DKG round after it has restarted -fn delayed_dkg() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let timeout = Duration::from_secs(200); - let num_signers = 3; - let mut signer_test = SignerTest::new(num_signers, vec![]); - boot_to_epoch_3_reward_set_calculation_boundary( - &signer_test.running_nodes.conf, - &signer_test.running_nodes.blocks_processed, - &signer_test.signer_stacks_private_keys, - &signer_test.signer_stacks_private_keys, - &mut signer_test.running_nodes.btc_regtest_controller, - Some(signer_test.num_stacking_cycles), - ); - let reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); - let public_keys = signer_test.get_signer_public_keys(reward_cycle); - let coordinator_selector = CoordinatorSelector::from(public_keys); - let (_, coordinator_public_key) = coordinator_selector.get_coordinator(); - let coordinator_public_key = - StacksPublicKey::from_slice(coordinator_public_key.to_bytes().as_slice()).unwrap(); - let signer_slot_ids: Vec<_> = (0..num_signers) - .into_iter() - .map(|i| SignerSlotID(i as u32)) - .collect(); - let mut stackerdbs: Vec<_> = signer_slot_ids - .iter() - .map(|i| { - StackerDBManager::new( - &signer_test.running_nodes.conf.node.rpc_bind, - StacksPrivateKey::new(), // Doesn't matter what key we use. We are just reading, not writing - false, - reward_cycle, - *i, - ) - }) - .collect(); - info!("------------------------- Stop Signers -------------------------"); - let mut to_stop = None; - for (idx, key) in signer_test.signer_stacks_private_keys.iter().enumerate() { - let public_key = StacksPublicKey::from_private(key); - if public_key == coordinator_public_key { - // Do not stop the coordinator. We want coordinator to start a DKG round - continue; - } - // Only stop one signer - to_stop = Some(idx); - break; - } - let signer_idx = to_stop.expect("Failed to find a signer to stop"); - let signer_key = signer_test.stop_signer(signer_idx); - debug!( - "Removed signer {signer_idx} with key: {:?}, {}", - signer_key, - signer_key.to_hex() - ); - info!("------------------------- Start DKG -------------------------"); - info!("Waiting for DKG to start..."); - // Advance one more to trigger DKG - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - timeout.as_secs(), - || Ok(true), - ) - .expect("Failed to mine bitcoin block"); - // Do not proceed until we guarantee that DKG was triggered - let start_time = Instant::now(); - loop { - let stackerdb = stackerdbs.first_mut().unwrap(); - let dkg_packets: Vec<_> = stackerdb - .get_dkg_packets(&signer_slot_ids) - .expect("Failed to get dkg packets"); - let begin_packets: Vec<_> = dkg_packets - .iter() - .filter_map(|packet| { - if matches!(packet.msg, Message::DkgBegin(_)) { - Some(packet) - } else { - None - } - }) - .collect(); - if !begin_packets.is_empty() { - break; - } - assert!( - start_time.elapsed() < Duration::from_secs(30), - "Timed out waiting for DKG to be triggered" - ); - } - - info!("------------------------- Restart Stopped Signer -------------------------"); - - signer_test.restart_signer(signer_idx, signer_key); - - info!("------------------------- Wait for DKG -------------------------"); - let key = signer_test.wait_for_dkg(timeout); - let mut transactions = HashSet::with_capacity(num_signers); - let start_time = Instant::now(); - while transactions.len() < num_signers { - for stackerdb in stackerdbs.iter_mut() { - let current_transactions = stackerdb - .get_current_transactions() - .expect("Failed getting current transactions for signer slot id"); - for tx in current_transactions { - transactions.insert(tx.txid()); - } - } - assert!( - start_time.elapsed() < Duration::from_secs(30), - "Failed to retrieve pending vote transactions within timeout" - ); - } - - // Make sure transactions get mined - let start_time = Instant::now(); - while !transactions.is_empty() { - assert!( - start_time.elapsed() < Duration::from_secs(30), - "Failed to mine transactions within timeout" - ); - next_block_and_wait( - &mut signer_test.running_nodes.btc_regtest_controller, - &signer_test.running_nodes.blocks_processed, - ); - let blocks = test_observer::get_blocks(); - for block in blocks.iter() { - let txs = block.get("transactions").unwrap().as_array().unwrap(); - for tx in txs.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - continue; - } - let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); - let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - transactions.remove(&parsed.txid()); - } - } - } - - // Make sure DKG did get set - assert_eq!( - key, - signer_test - .stacks_client - .get_approved_aggregate_key(reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found") - ); -} - -pub fn find_block_response(chunk_events: Vec) -> Option { - for event in chunk_events.into_iter() { - if event.contract_id.name.as_str() - == &format!("signers-1-{}", MessageSlotID::BlockResponse.to_u8()) - || event.contract_id.name.as_str() - == &format!("signers-0-{}", MessageSlotID::BlockResponse.to_u8()) - { - let Some(data) = event.modified_slots.first() else { - continue; - }; - let msg = SignerMessage::consensus_deserialize(&mut data.data.as_slice()).unwrap(); - return Some(msg); - } - } - None -} - -#[test] -#[ignore] -/// Test that a signer can respond to a miners request for a signature on a block proposal -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5. forcibly triggering DKG to set the key correctly -/// The stacks node is next advanced to epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node attempts to mine a Nakamoto block, sending a block to the observing signers via the -/// .miners stacker db instance. The signers submit the block to the stacks node for verification. -/// Upon receiving a Block Validation response approving the block, the signers perform a signing -/// round across its signature hash and return it back to the miner. -/// -/// Test Assertion: -/// Signers return an operation result containing a valid signature across the miner's Nakamoto block's signature hash. -/// Signers broadcasted a signature across the miner's proposed block back to the respective .signers-XXX-YYY contract. -/// Miner appends the signature to the block and finishes mininig it. -fn block_proposal() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let timeout = Duration::from_secs(30); - let short_timeout = Duration::from_secs(30); - - let key = signer_test.boot_to_epoch_3(timeout); - signer_test.mine_nakamoto_block(timeout); - - info!("------------------------- Test Block Proposal -------------------------"); - // Verify that the signers accepted the proposed block, sending back a validate ok response - let proposed_signer_signature_hash = signer_test - .wait_for_validate_ok_response(short_timeout) - .signer_signature_hash; - - info!("------------------------- Test Block Signed -------------------------"); - // Verify that the signers signed the proposed block - let signature = - signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, timeout); - assert!(signature - .0 - .verify(&key, proposed_signer_signature_hash.as_bytes())); - - // Test prometheus metrics response - #[cfg(feature = "monitoring_prom")] - { - let metrics_response = signer_test.get_signer_metrics(); - - // Because 5 signers are running in the same process, the prometheus metrics - // are incremented once for every signer. This is why we expect the metric to be - // `5`, even though there is only one block proposed. - let expected_result = format!("stacks_signer_block_proposals_received {}", num_signers); - assert!(metrics_response.contains(&expected_result)); - } - signer_test.shutdown(); -} - -#[test] -#[ignore] -/// Test that signers can handle a transition between Nakamoto reward cycles -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5, triggering a DKG round. The stacks node is then advanced -/// to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The node mines 2 full Nakamoto reward cycles, sending blocks to observing signers to sign and return. -/// -/// Test Assertion: -/// Signers can perform DKG and sign blocks across Nakamoto reward cycles. -fn mine_2_nakamoto_reward_cycles() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let nmb_reward_cycles = 2; - let mut signer_test: SignerTest = SignerTest::new(5, vec![]); - let timeout = Duration::from_secs(200); - let first_dkg = signer_test.boot_to_epoch_3(timeout); - let curr_reward_cycle = signer_test.get_current_reward_cycle(); - // Mine 2 full Nakamoto reward cycles (epoch 3 starts in the middle of one, hence the + 1) - let next_reward_cycle = curr_reward_cycle.saturating_add(1); - let final_reward_cycle = next_reward_cycle.saturating_add(nmb_reward_cycles); - let final_reward_cycle_height_boundary = signer_test - .running_nodes - .btc_regtest_controller - .get_burnchain() - .reward_cycle_to_block_height(final_reward_cycle) - .saturating_sub(1); - - info!("------------------------- Test Mine 2 Nakamoto Reward Cycles -------------------------"); - let dkgs = signer_test - .run_until_burnchain_height_nakamoto(timeout, final_reward_cycle_height_boundary); - assert_eq!(dkgs.len() as u64, nmb_reward_cycles.saturating_add(1)); // We will have mined the DKG vote for the following reward cycle - let last_dkg = dkgs - .last() - .expect(&format!( - "Failed to reach DKG for reward cycle {final_reward_cycle_height_boundary}" - )) - .clone(); - assert_ne!(first_dkg, last_dkg); - - let set_dkg = signer_test - .stacks_client - .get_approved_aggregate_key(final_reward_cycle) - .expect("Failed to get approved aggregate key") - .expect("No approved aggregate key found"); - assert_eq!(set_dkg, last_dkg); - - let current_burnchain_height = signer_test - .running_nodes - .btc_regtest_controller - .get_headers_height(); - assert_eq!(current_burnchain_height, final_reward_cycle_height_boundary); - signer_test.shutdown(); -} - -#[test] -#[ignore] -/// Test that signers will accept a miners block proposal and sign it if it contains all expected transactions, -/// filtering invalid transactions from the block requirements -/// -/// Test Setup: -/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5, triggering a DKG round. The stacks node is then advanced -/// to Epoch 3.0 boundary to allow block signing. It then advances to the prepare phase of the next reward cycle -/// to enable Nakamoto signers to look at the next signer transactions to compare against a proposed block. -/// -/// Test Execution: -/// The node attempts to mine a Nakamoto tenure, sending a block to the observing signers via the -/// .miners stacker db instance. The signers submit the block to the stacks node for verification. -/// Upon receiving a Block Validation response approving the block, the signers verify that it contains -/// all of the NEXT signers' expected transactions, being sure to filter out any invalid transactions -/// from stackerDB as well. -/// -/// Test Assertion: -/// Miner proposes a block to the signers containing all expected transactions. -/// Signers broadcast block approval with a signature back to the waiting miner. -/// Miner includes the signers' signature in the block and finishes mining it. -fn filter_bad_transactions() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - // Advance to the prepare phase of a post epoch 3.0 reward cycle to force signers to look at the next signer transactions to compare against a proposed block - let mut signer_test: SignerTest = SignerTest::new(5, vec![]); - let timeout = Duration::from_secs(200); - let current_signers_dkg = signer_test.boot_to_epoch_3(timeout); - let next_signers_dkg = signer_test - .run_to_dkg(timeout) - .expect("Failed to run to DKG"); - assert_ne!(current_signers_dkg, next_signers_dkg); - - info!("------------------------- Submit Invalid Transactions -------------------------"); - - let signer_private_key = signer_test - .signer_stacks_private_keys - .iter() - .find(|pk| { - let addr = to_addr(pk); - addr == *signer_test.stacks_client.get_signer_address() - }) - .cloned() - .expect("Cannot find signer private key for signer id 1"); - let next_reward_cycle = signer_test.get_current_reward_cycle().saturating_add(1); - // Must submit to the NEXT reward cycle slots as they are the ones looked at by the CURRENT miners - let signer_index = signer_test.get_signer_index(next_reward_cycle); - let mut stackerdb = StackerDBManager::new( - &signer_test.running_nodes.conf.node.rpc_bind, - signer_private_key, - false, - next_reward_cycle, - signer_index, - ); - - debug!( - "Signer address is {}", - &signer_test.stacks_client.get_signer_address() - ); - - let invalid_txs = signer_test.generate_invalid_transactions(); - let invalid_txids: HashSet = invalid_txs.iter().map(|tx| tx.txid()).collect(); - - // Submit transactions to stackerdb for the signers and miners to pick up during block verification - stackerdb - .send_message_with_retry(SignerMessage::Transactions(invalid_txs)) - .expect("Failed to write expected transactions to stackerdb"); - - info!("------------------------- Verify Nakamoto Block Mined -------------------------"); - let mined_block_event = - signer_test.mine_and_verify_confirmed_naka_block(¤t_signers_dkg, timeout); - for tx_event in &mined_block_event.tx_events { - let TransactionEvent::Success(tx_success) = tx_event else { - panic!("Received unexpected transaction event"); - }; - // Since we never broadcast the "invalid" transaction to the mempool and the transaction did not come from a signer or had an invalid nonce - // the miner should never construct a block that contains them and signers should still approve it - assert!( - !invalid_txids.contains(&tx_success.txid), - "Miner included an invalid transaction in the block" - ); - } - signer_test.shutdown(); -} - -#[test] -#[ignore] -/// Test that signers will be able to continue their operations even if one signer is restarted. -/// -/// Test Setup: -/// The test spins up three stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5, triggering a DKG round. The stacks node is then advanced -/// to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The signers sign one block as usual. -/// Then, one of the signers is restarted. -/// Finally, the signers sign another block with the restarted signer. -/// -/// Test Assertion: -/// The signers are able to produce a valid signature after one of them is restarted. -fn sign_after_signer_reboot() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let num_signers = 3; - let mut signer_test: SignerTest = SignerTest::new(num_signers, vec![]); - let timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(30); - - let key = signer_test.boot_to_epoch_3(timeout); - - info!("------------------------- Test Mine Block -------------------------"); - - signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test - .wait_for_validate_ok_response(short_timeout) - .signer_signature_hash; - let signature = - signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); - - assert!( - signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), - "Signature verification failed" - ); - - info!("------------------------- Restart one Signer -------------------------"); - let signer_key = signer_test.stop_signer(2); - debug!( - "Removed signer 2 with key: {:?}, {}", - signer_key, - signer_key.to_hex() - ); - signer_test.restart_signer(2, signer_key); - - info!("------------------------- Test Mine Block after restart -------------------------"); - - let last_block = signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test - .wait_for_validate_ok_response(short_timeout) - .signer_signature_hash; - let frost_signature = - signer_test.wait_for_confirmed_block_v1(&proposed_signer_signature_hash, short_timeout); - - // Check that the latest block's bitvec is all 1's - assert_eq!( - last_block.signer_bitvec, - serde_json::to_value(BitVec::<4000>::ones(num_signers as u16).unwrap()) - .expect("Failed to serialize BitVec") - .as_str() - .expect("Failed to serialize BitVec") - ); - - assert!( - frost_signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), - "Signature verification failed" - ); - - signer_test.shutdown(); -}