diff --git a/Cargo.lock b/Cargo.lock index 130b3bff21..43975f97c6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -60,6 +60,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "getrandom 0.2.15", "once_cell", "version_check", "zerocopy", @@ -180,9 +181,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "ark-bls12-377" @@ -822,6 +823,7 @@ dependencies = [ name = "astria-sequencer" version = "1.0.0" dependencies = [ + "anyhow", "assert-json-diff", "astria-build-info", "astria-config", @@ -847,16 +849,19 @@ dependencies = [ "penumbra-tower-trace", "pin-project-lite", "prost", + "quick_cache", "rand 0.8.5", "rand_chacha 0.3.1", "regex", "serde", "serde_json", "sha2 0.10.8", + "tempfile", "tendermint", "tendermint-proto", "thiserror", "tokio", + "tokio-stream", "tonic 0.10.2", "tower", "tower-abci", @@ -6578,6 +6583,18 @@ dependencies = [ "byteorder", ] +[[package]] +name = "quick_cache" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d7c94f8935a9df96bb6380e8592c70edf497a643f94bd23b2f76b399385dbf4" +dependencies = [ + "ahash", + "equivalent", + "hashbrown 0.14.5", + "parking_lot", +] + [[package]] name = "quote" version = "1.0.36" diff --git a/crates/astria-sequencer/CHANGELOG.md b/crates/astria-sequencer/CHANGELOG.md index c80e93586f..f5fbb8b0af 100644 --- a/crates/astria-sequencer/CHANGELOG.md +++ b/crates/astria-sequencer/CHANGELOG.md @@ -15,6 +15,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Consolidate action handling to single module [#1759](https://github.com/astriaorg/astria/pull/1759). - Ensure all deposit assets are trace prefixed [#1807](https://github.com/astriaorg/astria/pull/1807). - Update `idna` dependency to resolve cargo audit warning [#1869](https://github.com/astriaorg/astria/pull/1869). +- Provide and use new `Storage` struct, wrapping `cnidarium::Storage` [#1801](https://github.com/astriaorg/astria/pull/1801). +- Provide and use new `Snapshot` struct, wrapping `cnidarium::Snapshot` and + holding a cache of recently-read values [#1801](https://github.com/astriaorg/astria/pull/1801). ## [1.0.0] - 2024-10-25 diff --git a/crates/astria-sequencer/Cargo.toml b/crates/astria-sequencer/Cargo.toml index 56014df011..6a66db85e2 100644 --- a/crates/astria-sequencer/Cargo.toml +++ b/crates/astria-sequencer/Cargo.toml @@ -32,12 +32,14 @@ telemetry = { package = "astria-telemetry", path = "../astria-telemetry", featur "display", ] } +anyhow = "1.0.89" borsh = { version = "1.5.1", features = ["bytes", "derive"] } cnidarium = { git = "https://github.com/penumbra-zone/penumbra.git", rev = "ac7abacc9bb09503d6fd6a396bc0b6850079084e", features = [ "metrics", ] } ibc-proto = { version = "0.41.0", features = ["server"] } matchit = "0.7.2" +quick_cache = "0.6.9" tower = "0.4" tower-abci = "0.12.0" tower-actor = "0.1.0" @@ -60,10 +62,12 @@ regex = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } sha2 = { workspace = true } +tempfile = { workspace = true, optional = true } tendermint-proto = { workspace = true } tendermint = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["rt", "tracing"] } +tokio-stream = { workspace = true } tonic = { workspace = true } tracing = { workspace = true } @@ -79,6 +83,7 @@ config = { package = "astria-config", path = "../astria-config", features = [ insta = { workspace = true, features = ["json"] } maplit = "1.0.2" rand_chacha = "0.3.1" +tempfile = { workspace = true } tokio = { workspace = true, features = ["test-util"] } assert-json-diff = "2.0.2" diff --git a/crates/astria-sequencer/src/accounts/query.rs b/crates/astria-sequencer/src/accounts/query.rs index 00c23a4095..2767fd57e4 100644 --- a/crates/astria-sequencer/src/accounts/query.rs +++ b/crates/astria-sequencer/src/accounts/query.rs @@ -13,11 +13,7 @@ use astria_eyre::eyre::{ Result, WrapErr as _, }; -use cnidarium::{ - Snapshot, - StateRead, - Storage, -}; +use cnidarium::StateRead; use futures::TryStreamExt as _; use prost::Message as _; use tendermint::{ @@ -34,6 +30,10 @@ use crate::{ accounts::StateReadExt as _, app::StateReadExt as _, assets::StateReadExt as _, + storage::{ + Snapshot, + Storage, + }, }; async fn ibc_to_trace<S: StateRead>( diff --git a/crates/astria-sequencer/src/accounts/state_ext.rs b/crates/astria-sequencer/src/accounts/state_ext.rs index ff466469db..2c51e847ba 100644 --- a/crates/astria-sequencer/src/accounts/state_ext.rs +++ b/crates/astria-sequencer/src/accounts/state_ext.rs @@ -273,7 +273,6 @@ impl<T: StateWrite> StateWriteExt for T {} #[cfg(test)] mod tests { - use cnidarium::StateDelta; use futures::TryStreamExt as _; use super::*; @@ -286,6 +285,7 @@ mod tests { astria_address, nria, }, + storage::Storage, }; fn asset_0() -> asset::Denom { @@ -302,9 +302,8 @@ mod tests { #[tokio::test] async fn get_account_nonce_uninitialized_returns_zero() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); @@ -312,7 +311,7 @@ mod tests { // uninitialized accounts return zero assert_eq!( - state + state_delta .get_account_nonce(&address) .await .expect("getting a non-initialized account's nonce should not fail"), @@ -323,20 +322,19 @@ mod tests { #[tokio::test] async fn get_account_nonce_get_nonce_simple() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); let nonce_expected = 0u32; // can write new - state + state_delta .put_account_nonce(&address, nonce_expected) .expect("putting an account nonce should not fail"); assert_eq!( - state + state_delta .get_account_nonce(&address) .await .expect("a nonce was written and must exist inside the database"), @@ -346,11 +344,11 @@ mod tests { // can rewrite with new value let nonce_expected = 1u32; - state + state_delta .put_account_nonce(&address, nonce_expected) .expect("putting an account nonce should not fail"); assert_eq!( - state + state_delta .get_account_nonce(&address) .await .expect("a new nonce was written and must exist inside the database"), @@ -361,20 +359,19 @@ mod tests { #[tokio::test] async fn get_account_nonce_get_nonce_complex() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); let nonce_expected = 2u32; // can write new - state + state_delta .put_account_nonce(&address, nonce_expected) .expect("putting an account nonce should not fail"); assert_eq!( - state + state_delta .get_account_nonce(&address) .await .expect("a nonce was written and must exist inside the database"), @@ -386,11 +383,11 @@ mod tests { let address_1 = astria_address(&[41u8; 20]); let nonce_expected_1 = 3u32; - state + state_delta .put_account_nonce(&address_1, nonce_expected_1) .expect("putting an account nonce should not fail"); assert_eq!( - state + state_delta .get_account_nonce(&address_1) .await .expect("a new nonce was written and must exist inside the database"), @@ -398,7 +395,7 @@ mod tests { "additional account's nonce was not what was expected" ); assert_eq!( - state + state_delta .get_account_nonce(&address) .await .expect("a new nonce was written and must exist inside the database"), @@ -409,9 +406,8 @@ mod tests { #[tokio::test] async fn get_account_balance_uninitialized_returns_zero() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); @@ -420,7 +416,7 @@ mod tests { // non-initialized accounts return zero assert_eq!( - state + state_delta .get_account_balance(&address, &asset) .await .expect("getting a non-initialized asset balance should not fail"), @@ -431,22 +427,21 @@ mod tests { #[tokio::test] async fn get_account_balance_simple() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); let asset = asset_0(); let mut amount_expected = 1u128; - state + state_delta .put_account_balance(&address, &asset, amount_expected) .expect("putting an account balance should not fail"); // can initialize assert_eq!( - state + state_delta .get_account_balance(&address, &asset) .await .expect("getting an asset balance should not fail"), @@ -457,12 +452,12 @@ mod tests { // can update balance amount_expected = 2u128; - state + state_delta .put_account_balance(&address, &asset, amount_expected) .expect("putting an asset balance for an account should not fail"); assert_eq!( - state + state_delta .get_account_balance(&address, &asset) .await .expect("getting an asset balance should not fail"), @@ -473,22 +468,21 @@ mod tests { #[tokio::test] async fn get_account_balance_multiple_accounts() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); let asset = asset_0(); let amount_expected = 1u128; - state + state_delta .put_account_balance(&address, &asset, amount_expected) .expect("putting an account balance should not fail"); // able to write to account's storage assert_eq!( - state + state_delta .get_account_balance(&address, &asset) .await .expect("getting an asset balance should not fail"), @@ -501,11 +495,11 @@ mod tests { let address_1 = astria_address(&[41u8; 20]); let amount_expected_1 = 2u128; - state + state_delta .put_account_balance(&address_1, &asset, amount_expected_1) .expect("putting an account balance should not fail"); assert_eq!( - state + state_delta .get_account_balance(&address_1, &asset) .await .expect("getting an asset balance should not fail"), @@ -514,7 +508,7 @@ mod tests { account update" ); assert_eq!( - state + state_delta .get_account_balance(&address, &asset) .await .expect("getting an asset balance should not fail"), @@ -526,9 +520,8 @@ mod tests { #[tokio::test] async fn get_account_balance_multiple_assets() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); @@ -537,16 +530,16 @@ mod tests { let amount_expected_0 = 1u128; let amount_expected_1 = 2u128; - state + state_delta .put_account_balance(&address, &asset_0, amount_expected_0) .expect("putting an account balance should not fail"); - state + state_delta .put_account_balance(&address, &asset_1, amount_expected_1) .expect("putting an account balance should not fail"); // wrote correct balances assert_eq!( - state + state_delta .get_account_balance(&address, &asset_0) .await .expect("getting an asset balance should not fail"), @@ -554,7 +547,7 @@ mod tests { "returned balance for an asset did not match expected" ); assert_eq!( - state + state_delta .get_account_balance(&address, &asset_1) .await .expect("getting an asset balance should not fail"), @@ -565,15 +558,14 @@ mod tests { #[tokio::test] async fn account_asset_balances_uninitialized_ok() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); // see that call was ok - let stream = state.account_asset_balances(&address); + let stream = state_delta.account_asset_balances(&address); // Collect the stream into a vector let balances: Vec<_> = stream @@ -590,25 +582,24 @@ mod tests { #[tokio::test] async fn account_asset_balances() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // native account should work with ibc too - state.put_native_asset(nria()).unwrap(); + state_delta.put_native_asset(nria()).unwrap(); - let asset_0 = state.get_native_asset().await.unwrap().unwrap(); + let asset_0 = state_delta.get_native_asset().await.unwrap().unwrap(); let asset_1 = asset_1(); let asset_2 = asset_2(); // also need to add assets to the ibc state - state + state_delta .put_ibc_asset(asset_0.clone()) .expect("should be able to call other trait method on state object"); - state + state_delta .put_ibc_asset(asset_1.clone().unwrap_trace_prefixed()) .expect("should be able to call other trait method on state object"); - state + state_delta .put_ibc_asset(asset_2.clone().unwrap_trace_prefixed()) .expect("should be able to call other trait method on state object"); @@ -619,17 +610,17 @@ mod tests { let amount_expected_2 = 3u128; // add balances to the account - state + state_delta .put_account_balance(&address, &asset_0, amount_expected_0) .expect("putting an account balance should not fail"); - state + state_delta .put_account_balance(&address, &asset_1, amount_expected_1) .expect("putting an account balance should not fail"); - state + state_delta .put_account_balance(&address, &asset_2, amount_expected_2) .expect("putting an account balance should not fail"); - let mut balances = state + let mut balances = state_delta .account_asset_balances(&address) .try_collect::<Vec<_>>() .await @@ -656,23 +647,22 @@ mod tests { #[tokio::test] async fn increase_balance_from_uninitialized() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); let asset = asset_0(); let amount_increase = 2u128; - state + state_delta .increase_balance(&address, &asset, amount_increase) .await .expect("increasing account balance for uninitialized account should be ok"); // correct balance was set assert_eq!( - state + state_delta .get_account_balance(&address, &asset) .await .expect("getting an asset balance should not fail"), @@ -680,13 +670,13 @@ mod tests { "returned balance for an asset balance did not match expected" ); - state + state_delta .increase_balance(&address, &asset, amount_increase) .await .expect("increasing account balance for initialized account should be ok"); assert_eq!( - state + state_delta .get_account_balance(&address, &asset) .await .expect("getting an asset balance should not fail"), @@ -697,23 +687,22 @@ mod tests { #[tokio::test] async fn decrease_balance_enough_funds() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); let asset = asset_0(); let amount_increase = 2u128; - state + state_delta .increase_balance(&address, &asset, amount_increase) .await .expect("increasing account balance for uninitialized account should be ok"); // correct balance was set assert_eq!( - state + state_delta .get_account_balance(&address, &asset) .await .expect("getting an asset balance should not fail"), @@ -722,13 +711,13 @@ mod tests { ); // decrease balance - state + state_delta .decrease_balance(&address, &asset, amount_increase) .await .expect("decreasing account balance for initialized account should be ok"); assert_eq!( - state + state_delta .get_account_balance(&address, &asset) .await .expect("getting an asset balance should not fail"), @@ -739,9 +728,8 @@ mod tests { #[tokio::test] async fn decrease_balance_not_enough_funds() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // create needed variables let address = astria_address(&[42u8; 20]); @@ -749,13 +737,13 @@ mod tests { let amount_increase = 2u128; // give initial balance - state + state_delta .increase_balance(&address, &asset, amount_increase) .await .expect("increasing account balance for uninitialized account should be ok"); // decrease balance - let _ = state + let _ = state_delta .decrease_balance(&address, &asset, amount_increase + 1) .await .expect_err("should not be able to subtract larger balance than what existed"); diff --git a/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs b/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs index a69775cf5f..767941b444 100644 --- a/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/bridge_sudo_change.rs @@ -87,7 +87,6 @@ mod tests { transaction::v1::action::BridgeSudoChange, }, }; - use cnidarium::StateDelta; use crate::{ accounts::StateWriteExt as _, @@ -105,6 +104,7 @@ mod tests { StateWriteExt as _, }, fees::StateWriteExt as _, + storage::Storage, transaction::{ StateWriteExt as _, TransactionContext, @@ -113,23 +113,24 @@ mod tests { #[tokio::test] async fn bridge_sudo_change_fails_with_unauthorized_if_signer_is_not_sudo_address() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: [1; 20], transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, }); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); let asset = test_asset(); - state.put_allowed_fee_asset(&asset).unwrap(); + state_delta.put_allowed_fee_asset(&asset).unwrap(); let bridge_address = astria_address(&[99; 20]); let sudo_address = astria_address(&[98; 20]); - state + state_delta .put_bridge_account_sudo_address(&bridge_address, sudo_address) .unwrap(); @@ -142,7 +143,7 @@ mod tests { assert!( action - .check_and_execute(state) + .check_and_execute(state_delta) .await .unwrap_err() .to_string() @@ -152,33 +153,34 @@ mod tests { #[tokio::test] async fn bridge_sudo_change_executes_as_expected() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let sudo_address = astria_address(&[98; 20]); - state.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: sudo_address.bytes(), transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, }); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta .put_fees(FeeComponents::<BridgeSudoChange>::new(10, 0)) .unwrap(); let fee_asset = test_asset(); - state.put_allowed_fee_asset(&fee_asset).unwrap(); + state_delta.put_allowed_fee_asset(&fee_asset).unwrap(); let bridge_address = astria_address(&[99; 20]); - state + state_delta .put_bridge_account_sudo_address(&bridge_address, sudo_address) .unwrap(); let new_sudo_address = astria_address(&[98; 20]); let new_withdrawer_address = astria_address(&[97; 20]); - state + state_delta .put_account_balance(&bridge_address, &fee_asset, 10) .unwrap(); @@ -189,17 +191,17 @@ mod tests { fee_asset, }; - action.check_and_execute(&mut state).await.unwrap(); + action.check_and_execute(&mut state_delta).await.unwrap(); assert_eq!( - state + state_delta .get_bridge_account_sudo_address(&bridge_address) .await .unwrap(), Some(new_sudo_address.bytes()), ); assert_eq!( - state + state_delta .get_bridge_account_withdrawer_address(&bridge_address) .await .unwrap(), diff --git a/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs b/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs index d2362a5742..28c40c563d 100644 --- a/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs +++ b/crates/astria-sequencer/src/action_handler/impls/bridge_unlock.rs @@ -113,7 +113,6 @@ mod tests { transaction::v1::action::BridgeUnlock, }, }; - use cnidarium::StateDelta; use crate::{ accounts::StateWriteExt as _, @@ -129,6 +128,7 @@ mod tests { }, bridge::StateWriteExt as _, fees::StateWriteExt as _, + storage::Storage, transaction::{ StateWriteExt as _, TransactionContext, @@ -137,23 +137,24 @@ mod tests { #[tokio::test] async fn bridge_unlock_fails_if_bridge_account_has_no_withdrawer_address() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: [1; 20], transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, }); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); let asset = test_asset(); let transfer_amount = 100; let to_address = astria_address(&[2; 20]); let bridge_address = astria_address(&[3; 20]); - state + state_delta .put_bridge_account_ibc_asset(&bridge_address, &asset) .unwrap(); @@ -169,23 +170,27 @@ mod tests { // invalid sender, doesn't match action's `from`, should fail assert_eyre_error( - &bridge_unlock.check_and_execute(state).await.unwrap_err(), + &bridge_unlock + .check_and_execute(state_delta) + .await + .unwrap_err(), "bridge account does not have an associated withdrawer address", ); } #[tokio::test] async fn bridge_unlock_fails_if_withdrawer_is_not_signer() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: [1; 20], transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, }); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); let asset = test_asset(); let transfer_amount = 100; @@ -193,10 +198,10 @@ mod tests { let to_address = astria_address(&[2; 20]); let bridge_address = astria_address(&[3; 20]); let withdrawer_address = astria_address(&[4; 20]); - state + state_delta .put_bridge_account_withdrawer_address(&bridge_address, withdrawer_address) .unwrap(); - state + state_delta .put_bridge_account_ibc_asset(&bridge_address, &asset) .unwrap(); @@ -212,47 +217,51 @@ mod tests { // invalid sender, doesn't match action's bridge account's withdrawer, should fail assert_eyre_error( - &bridge_unlock.check_and_execute(state).await.unwrap_err(), + &bridge_unlock + .check_and_execute(state_delta) + .await + .unwrap_err(), "unauthorized to unlock bridge account", ); } #[tokio::test] async fn bridge_unlock_executes_with_duplicated_withdrawal_event_id() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let bridge_address = astria_address(&[1; 20]); - state.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: bridge_address.bytes(), transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, }); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); let asset = test_asset(); let transfer_fee = 10; let transfer_amount = 100; - state + state_delta .put_fees(FeeComponents::<BridgeUnlock>::new(transfer_fee, 0)) .unwrap(); let to_address = astria_address(&[2; 20]); let rollup_id = RollupId::from_unhashed_bytes(b"test_rollup_id"); - state + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state + state_delta .put_bridge_account_ibc_asset(&bridge_address, &asset) .unwrap(); - state + state_delta .put_bridge_account_withdrawer_address(&bridge_address, bridge_address) .unwrap(); - state.put_allowed_fee_asset(&asset).unwrap(); + state_delta.put_allowed_fee_asset(&asset).unwrap(); // Put plenty of balance - state + state_delta .put_account_balance(&bridge_address, &asset, 3 * transfer_amount) .unwrap(); @@ -272,12 +281,12 @@ mod tests { // first should succeed, next should fail due to duplicate event. bridge_unlock_first - .check_and_execute(&mut state) + .check_and_execute(&mut state_delta) .await .unwrap(); assert_eyre_error( &bridge_unlock_second - .check_and_execute(&mut state) + .check_and_execute(&mut state_delta) .await .unwrap_err(), "withdrawal event already processed", diff --git a/crates/astria-sequencer/src/action_handler/impls/fee_change.rs b/crates/astria-sequencer/src/action_handler/impls/fee_change.rs index f066ded9ea..2157c79ee3 100644 --- a/crates/astria-sequencer/src/action_handler/impls/fee_change.rs +++ b/crates/astria-sequencer/src/action_handler/impls/fee_change.rs @@ -102,7 +102,10 @@ mod tests { FeeHandler, StateReadExt as _, }, - storage::StoredValue, + storage::{ + Storage, + StoredValue, + }, transaction::{ StateWriteExt as _, TransactionContext, @@ -185,20 +188,19 @@ mod tests { FeeComponents<F>: TryFrom<StoredValue<'a>, Error = Report> + Debug, FeeChange: From<FeeComponents<F>>, { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = cnidarium::StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // Put the context to enable the txs to execute. - state.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: [1; 20], transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, }); - state.put_sudo_address([1; 20]).unwrap(); + state_delta.put_sudo_address([1; 20]).unwrap(); assert!( - state + state_delta .get_fees::<F>() .await .expect("should not error fetching unstored action fees") @@ -209,11 +211,11 @@ mod tests { let initial_fees = FeeComponents::<F>::new(1, 2); let initial_fee_change = FeeChange::from(initial_fees); initial_fee_change - .check_and_execute(&mut state) + .check_and_execute(&mut state_delta) .await .unwrap(); - let retrieved_fees = state + let retrieved_fees = state_delta .get_fees::<F>() .await .expect("should not error fetching initial action fees") @@ -223,9 +225,12 @@ mod tests { // Execute a second fee change tx to overwrite the fees. let new_fees = FeeComponents::<F>::new(3, 4); let new_fee_change = FeeChange::from(new_fees); - new_fee_change.check_and_execute(&mut state).await.unwrap(); + new_fee_change + .check_and_execute(&mut state_delta) + .await + .unwrap(); - let retrieved_fees = state + let retrieved_fees = state_delta .get_fees::<F>() .await .expect("should not error fetching new action fees") diff --git a/crates/astria-sequencer/src/action_handler/impls/ics20_withdrawal.rs b/crates/astria-sequencer/src/action_handler/impls/ics20_withdrawal.rs index 35d1523ece..6b1fbbae27 100644 --- a/crates/astria-sequencer/src/action_handler/impls/ics20_withdrawal.rs +++ b/crates/astria-sequencer/src/action_handler/impls/ics20_withdrawal.rs @@ -272,7 +272,6 @@ mod tests { primitive::v1::RollupId, protocol::transaction::v1::action, }; - use cnidarium::StateDelta; use ibc_types::core::client::Height; use crate::{ @@ -287,13 +286,13 @@ mod tests { ASTRIA_PREFIX, }, bridge::StateWriteExt as _, + storage::Storage, }; #[tokio::test] async fn withdrawal_target_is_sender_if_bridge_is_not_set_and_sender_is_not_bridge() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); let denom = test_asset(); let from = [1u8; 20]; @@ -312,7 +311,7 @@ mod tests { }; assert_eq!( - *establish_withdrawal_target(&action, &state, &from) + *establish_withdrawal_target(&action, &state_delta, &from) .await .unwrap(), from @@ -321,21 +320,22 @@ mod tests { #[tokio::test] async fn withdrawal_target_is_sender_if_bridge_is_unset_but_sender_is_bridge() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); // sender is a bridge address, which is also the withdrawer, so it's ok let bridge_address = [1u8; 20]; - state + state_delta .put_bridge_account_rollup_id( &bridge_address, RollupId::from_unhashed_bytes("testrollupid"), ) .unwrap(); - state + state_delta .put_bridge_account_withdrawer_address(&bridge_address, bridge_address) .unwrap(); @@ -355,7 +355,7 @@ mod tests { }; assert_eyre_error( - &establish_withdrawal_target(&action, &state, &bridge_address) + &establish_withdrawal_target(&action, &state_delta, &bridge_address) .await .unwrap_err(), "sender cannot be a bridge address if bridge address is not set", @@ -386,20 +386,21 @@ mod tests { } async fn run_test(action: action::Ics20Withdrawal) { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); // withdraw is *not* the bridge address, Ics20Withdrawal must be sent by the withdrawer - state + state_delta .put_bridge_account_rollup_id( &bridge_address(), RollupId::from_unhashed_bytes("testrollupid"), ) .unwrap(); - state + state_delta .put_bridge_account_withdrawer_address( &bridge_address(), astria_address(&[2u8; 20]), @@ -407,7 +408,7 @@ mod tests { .unwrap(); assert_eyre_error( - &establish_withdrawal_target(&action, &state, &bridge_address()) + &establish_withdrawal_target(&action, &state_delta, &bridge_address()) .await .unwrap_err(), "sender does not match bridge withdrawer address; unauthorized", @@ -424,22 +425,23 @@ mod tests { #[tokio::test] async fn bridge_sender_is_withdrawal_target() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); // sender the withdrawer address, so it's ok let bridge_address = [1u8; 20]; let withdrawer_address = [2u8; 20]; - state + state_delta .put_bridge_account_rollup_id( &bridge_address, RollupId::from_unhashed_bytes("testrollupid"), ) .unwrap(); - state + state_delta .put_bridge_account_withdrawer_address(&bridge_address, withdrawer_address) .unwrap(); @@ -459,7 +461,7 @@ mod tests { }; assert_eq!( - *establish_withdrawal_target(&action, &state, &withdrawer_address) + *establish_withdrawal_target(&action, &state_delta, &withdrawer_address) .await .unwrap(), bridge_address, @@ -468,9 +470,8 @@ mod tests { #[tokio::test] async fn bridge_is_rejected_as_withdrawal_target_because_it_has_no_withdrawer_address_set() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); // sender is not the withdrawer address, so must fail let not_bridge_address = [1u8; 20]; @@ -491,7 +492,7 @@ mod tests { }; assert_eyre_error( - &establish_withdrawal_target(&action, &state, ¬_bridge_address) + &establish_withdrawal_target(&action, &state_delta, ¬_bridge_address) .await .unwrap_err(), "bridge address must have a withdrawer address set", diff --git a/crates/astria-sequencer/src/address/state_ext.rs b/crates/astria-sequencer/src/address/state_ext.rs index 0f5cd5bf30..b0a815c59b 100644 --- a/crates/astria-sequencer/src/address/state_ext.rs +++ b/crates/astria-sequencer/src/address/state_ext.rs @@ -109,32 +109,29 @@ impl<T: StateWrite> StateWriteExt for T {} #[cfg(test)] mod tests { - use cnidarium::StateDelta; - use super::*; + use crate::storage::Storage; #[tokio::test] async fn put_and_get_base_prefix() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_base_prefix("astria".to_string()).unwrap(); - assert_eq!("astria", &state.get_base_prefix().await.unwrap()); + state_delta.put_base_prefix("astria".to_string()).unwrap(); + assert_eq!("astria", &state_delta.get_base_prefix().await.unwrap()); } #[tokio::test] async fn put_and_get_ibc_compat_prefix() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state + state_delta .put_ibc_compat_prefix("astriacompat".to_string()) .unwrap(); assert_eq!( "astriacompat", - &state.get_ibc_compat_prefix().await.unwrap() + &state_delta.get_ibc_compat_prefix().await.unwrap() ); } } diff --git a/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs b/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs index 7171bce58d..c829a87f30 100644 --- a/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs +++ b/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs @@ -31,11 +31,7 @@ use astria_core::{ Protobuf, }; use astria_eyre::eyre::WrapErr as _; -use cnidarium::{ - Snapshot, - StateDelta, - Storage, -}; +use cnidarium::StateDelta; use penumbra_ibc::IbcRelay; use telemetry::Metrics as _; @@ -50,6 +46,10 @@ use crate::{ fees::StateWriteExt as _, mempool::Mempool, metrics::Metrics, + storage::{ + Snapshot, + Storage, + }, }; pub(crate) const ALICE_ADDRESS: &str = "1c0c490f1b5528d8173c5de46d131160e4b2c0c3"; @@ -124,9 +124,7 @@ pub(crate) async fn initialize_app_with_storage( genesis_state: Option<GenesisAppState>, genesis_validators: Vec<ValidatorUpdate>, ) -> (App, Storage) { - let storage = cnidarium::TempStorage::new() - .await - .expect("failed to create temp storage backing chain state"); + let storage = Storage::new_temp().await; let snapshot = storage.latest_snapshot(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); let mempool = Mempool::new(metrics, 100); @@ -248,121 +246,120 @@ pub(crate) fn mock_state_put_account_nonce( } pub(crate) async fn mock_state_getter() -> StateDelta<Snapshot> { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state: StateDelta<cnidarium::Snapshot> = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // setup denoms - state + state_delta .put_ibc_asset(denom_0().unwrap_trace_prefixed()) .unwrap(); - state + state_delta .put_ibc_asset(denom_1().unwrap_trace_prefixed()) .unwrap(); - state + state_delta .put_ibc_asset(denom_2().unwrap_trace_prefixed()) .unwrap(); - state + state_delta .put_ibc_asset(denom_3().unwrap_trace_prefixed()) .unwrap(); - state + state_delta .put_ibc_asset(denom_4().unwrap_trace_prefixed()) .unwrap(); - state + state_delta .put_ibc_asset(denom_5().unwrap_trace_prefixed()) .unwrap(); - state + state_delta .put_ibc_asset(denom_6().unwrap_trace_prefixed()) .unwrap(); // setup tx fees let transfer_fees = FeeComponents::<Transfer>::new(0, 0); - state + state_delta .put_fees(transfer_fees) .wrap_err("failed to initiate transfer fee components") .unwrap(); let rollup_data_submission_fees = FeeComponents::<RollupDataSubmission>::new(MOCK_SEQUENCE_FEE, 0); - state + state_delta .put_fees(rollup_data_submission_fees) .wrap_err("failed to initiate rollup data submission fee components") .unwrap(); let ics20_withdrawal_fees = FeeComponents::<Ics20Withdrawal>::new(0, 0); - state + state_delta .put_fees(ics20_withdrawal_fees) .wrap_err("failed to initiate ics20 withdrawal fee components") .unwrap(); let init_bridge_account_fees = FeeComponents::<InitBridgeAccount>::new(0, 0); - state + state_delta .put_fees(init_bridge_account_fees) .wrap_err("failed to initiate init bridge account fee components") .unwrap(); let bridge_lock_fees = FeeComponents::<BridgeLock>::new(0, 0); - state + state_delta .put_fees(bridge_lock_fees) .wrap_err("failed to initiate bridge lock fee components") .unwrap(); let bridge_unlock_fees = FeeComponents::<BridgeUnlock>::new(0, 0); - state + state_delta .put_fees(bridge_unlock_fees) .wrap_err("failed to initiate bridge unlock fee components") .unwrap(); let bridge_sudo_change_fees = FeeComponents::<BridgeSudoChange>::new(0, 0); - state + state_delta .put_fees(bridge_sudo_change_fees) .wrap_err("failed to initiate bridge sudo change fee components") .unwrap(); let ibc_relay_fees = FeeComponents::<IbcRelay>::new(0, 0); - state + state_delta .put_fees(ibc_relay_fees) .wrap_err("failed to initiate ibc relay fee components") .unwrap(); let validator_update_fees = FeeComponents::<ValidatorUpdate>::new(0, 0); - state + state_delta .put_fees(validator_update_fees) .wrap_err("failed to initiate validator update fee components") .unwrap(); let fee_asset_change_fees = FeeComponents::<FeeAssetChange>::new(0, 0); - state + state_delta .put_fees(fee_asset_change_fees) .wrap_err("failed to initiate fee asset change fee components") .unwrap(); let fee_change_fees = FeeComponents::<FeeChange>::new(0, 0); - state + state_delta .put_fees(fee_change_fees) .wrap_err("failed to initiate fee change fees fee components") .unwrap(); let ibc_relayer_change_fees = FeeComponents::<IbcRelayerChange>::new(0, 0); - state + state_delta .put_fees(ibc_relayer_change_fees) .wrap_err("failed to initiate ibc relayer change fee components") .unwrap(); let sudo_address_change_fees = FeeComponents::<SudoAddressChange>::new(0, 0); - state + state_delta .put_fees(sudo_address_change_fees) .wrap_err("failed to initiate sudo address change fee components") .unwrap(); let ibc_sudo_change_fees = FeeComponents::<IbcSudoChange>::new(0, 0); - state + state_delta .put_fees(ibc_sudo_change_fees) .wrap_err("failed to initiate ibc sudo change fee components") .unwrap(); // put denoms as allowed fee asset - state.put_allowed_fee_asset(&denom_0()).unwrap(); + state_delta.put_allowed_fee_asset(&denom_0()).unwrap(); - state + state_delta } diff --git a/crates/astria-sequencer/src/app/benchmarks.rs b/crates/astria-sequencer/src/app/benchmarks.rs index 390f650de1..c14001bf86 100644 --- a/crates/astria-sequencer/src/app/benchmarks.rs +++ b/crates/astria-sequencer/src/app/benchmarks.rs @@ -12,7 +12,6 @@ use astria_core::{ }, Protobuf, }; -use cnidarium::Storage; use crate::{ app::{ @@ -29,6 +28,7 @@ use crate::{ SIGNER_COUNT, }, proposal::block_size_constraints::BlockSizeConstraints, + storage::Storage, }; /// The max time for any benchmark. diff --git a/crates/astria-sequencer/src/app/mod.rs b/crates/astria-sequencer/src/app/mod.rs index 78386c6f9f..e66400603d 100644 --- a/crates/astria-sequencer/src/app/mod.rs +++ b/crates/astria-sequencer/src/app/mod.rs @@ -37,25 +37,20 @@ use astria_core::{ sequencerblock::v1::block::SequencerBlock, Protobuf as _, }; -use astria_eyre::{ - anyhow_to_eyre, - eyre::{ - bail, - ensure, - eyre, - OptionExt as _, - Result, - WrapErr as _, - }, +use astria_eyre::eyre::{ + bail, + ensure, + eyre, + OptionExt as _, + Result, + WrapErr as _, }; use cnidarium::{ - ArcStateDeltaExt, - Snapshot, + ArcStateDeltaExt as _, StagedWriteBatch, StateDelta, StateRead, StateWrite, - Storage, }; use prost::Message as _; use sha2::{ @@ -127,6 +122,10 @@ use crate::{ GeneratedCommitments, }, }, + storage::{ + Snapshot, + Storage, + }, }; // ephemeral store key for the cache of results of executing of transactions in `prepare_proposal`. @@ -183,7 +182,7 @@ impl From<abci::request::ProcessProposal> for ProposalFingerprint { /// /// [Penumbra reference]: https://github.com/penumbra-zone/penumbra/blob/9cc2c644e05c61d21fdc7b507b96016ba6b9a935/app/src/app/mod.rs#L42 pub(crate) struct App { - state: InterBlockState, + state_delta: InterBlockState, // The mempool of the application. // @@ -245,7 +244,6 @@ impl App { let app_hash: AppHash = snapshot .root_hash() .await - .map_err(anyhow_to_eyre) .wrap_err("failed to get current root hash")? .0 .to_vec() @@ -254,10 +252,10 @@ impl App { // We perform the `Arc` wrapping of `State` here to ensure // there should be no unexpected copies elsewhere. - let state = Arc::new(StateDelta::new(snapshot)); + let state_delta = Arc::new(snapshot.new_delta()); Ok(Self { - state, + state_delta, mempool, executed_proposal_fingerprint: None, executed_proposal_hash: Hash::default(), @@ -276,43 +274,43 @@ impl App { genesis_validators: Vec<ValidatorUpdate>, chain_id: String, ) -> Result<AppHash> { - let mut state_tx = self - .state + let mut delta_delta = self + .state_delta .try_begin_transaction() .expect("state Arc should not be referenced elsewhere"); - state_tx + delta_delta .put_base_prefix(genesis_state.address_prefixes().base().to_string()) .wrap_err("failed to write base prefix to state")?; - state_tx + delta_delta .put_ibc_compat_prefix(genesis_state.address_prefixes().ibc_compat().to_string()) .wrap_err("failed to write ibc-compat prefix to state")?; if let Some(native_asset) = genesis_state.native_asset_base_denomination() { - state_tx + delta_delta .put_native_asset(native_asset.clone()) .wrap_err("failed to write native asset to state")?; - state_tx + delta_delta .put_ibc_asset(native_asset.clone()) .wrap_err("failed to commit native asset as ibc asset to state")?; } - state_tx + delta_delta .put_chain_id_and_revision_number(chain_id.try_into().context("invalid chain ID")?) .wrap_err("failed to write chain id to state")?; - state_tx + delta_delta .put_block_height(0) .wrap_err("failed to write block height to state")?; // call init_chain on all components - FeesComponent::init_chain(&mut state_tx, &genesis_state) + FeesComponent::init_chain(&mut delta_delta, &genesis_state) .await .wrap_err("init_chain failed on FeesComponent")?; - AccountsComponent::init_chain(&mut state_tx, &genesis_state) + AccountsComponent::init_chain(&mut delta_delta, &genesis_state) .await .wrap_err("init_chain failed on AccountsComponent")?; AuthorityComponent::init_chain( - &mut state_tx, + &mut delta_delta, &AuthorityComponentAppState { authority_sudo_address: *genesis_state.authority_sudo_address(), genesis_validators, @@ -320,11 +318,11 @@ impl App { ) .await .wrap_err("init_chain failed on AuthorityComponent")?; - IbcComponent::init_chain(&mut state_tx, &genesis_state) + IbcComponent::init_chain(&mut delta_delta, &genesis_state) .await .wrap_err("init_chain failed on IbcComponent")?; - state_tx.apply(); + delta_delta.apply(); let app_hash = self .prepare_commit(storage) @@ -341,7 +339,7 @@ impl App { // if the previous round was committed, then the state stays the same. // // this also clears the ephemeral storage. - self.state = Arc::new(StateDelta::new(storage.latest_snapshot())); + self.state_delta = Arc::new(storage.new_delta_of_latest_snapshot()); // clear the cached executed proposal hash self.executed_proposal_hash = Hash::default(); @@ -390,7 +388,7 @@ impl App { self.metrics .record_proposal_transactions(signed_txs_included.len()); - let deposits = self.state.get_cached_block_deposits(); + let deposits = self.state_delta.get_cached_block_deposits(); self.metrics.record_proposal_deposits(deposits.len()); // generate commitment to sequence::Actions and deposits and commitment to the rollup IDs @@ -428,7 +426,7 @@ impl App { // `SequencerBlock` and to set `self.finalize_block`. // // we can't run this in `prepare_proposal` as we don't know the block hash there. - let Some(tx_results) = self.state.object_get(EXECUTION_RESULTS_KEY) else { + let Some(tx_results) = self.state_delta.object_get(EXECUTION_RESULTS_KEY) else { bail!("execution results must be present after executing transactions") }; @@ -514,7 +512,7 @@ impl App { ); self.metrics.record_proposal_transactions(signed_txs.len()); - let deposits = self.state.get_cached_block_deposits(); + let deposits = self.state_delta.get_cached_block_deposits(); self.metrics.record_proposal_deposits(deposits.len()); let GeneratedCommitments { @@ -583,7 +581,7 @@ impl App { // get copy of transactions to execute from mempool let pending_txs = self .mempool - .builder_queue(&self.state) + .builder_queue(&self.state_delta) .await .expect("failed to fetch pending transactions"); @@ -725,10 +723,10 @@ impl App { // to the ephemeral store. // this is okay as we should have the only reference to the state // at this point. - let mut state_tx = Arc::try_begin_transaction(&mut self.state) + let mut delta_delta = Arc::try_begin_transaction(&mut self.state_delta) .expect("state Arc should not be referenced elsewhere"); - state_tx.object_put(EXECUTION_RESULTS_KEY, execution_results); - let _ = state_tx.apply(); + delta_delta.object_put(EXECUTION_RESULTS_KEY, execution_results); + let _ = delta_delta.apply(); Ok((validated_txs, included_signed_txs)) } @@ -826,7 +824,7 @@ impl App { #[instrument(name = "App::pre_execute_transactions", skip_all, err)] async fn pre_execute_transactions(&mut self, block_data: BlockData) -> Result<()> { let chain_id = self - .state + .state_delta .get_chain_id() .await .wrap_err("failed to get chain ID from state")?; @@ -893,12 +891,12 @@ impl App { }; let chain_id = self - .state + .state_delta .get_chain_id() .await .wrap_err("failed to get chain ID from state")?; let sudo_address = self - .state + .state_delta .get_sudo_address() .await .wrap_err("failed to get sudo address from state")?; @@ -906,14 +904,14 @@ impl App { let end_block = self.end_block(height.value(), &sudo_address).await?; // get deposits for this block from state's ephemeral cache and put them to storage. - let mut state_tx = StateDelta::new(self.state.clone()); - let deposits_in_this_block = self.state.get_cached_block_deposits(); + let mut delta_delta = StateDelta::new(self.state_delta.clone()); + let deposits_in_this_block = self.state_delta.get_cached_block_deposits(); debug!( deposits = %telemetry::display::json(&deposits_in_this_block), "got block deposits from state" ); - state_tx + delta_delta .put_deposits(&block_hash, deposits_in_this_block.clone()) .wrap_err("failed to put deposits to state")?; @@ -936,7 +934,7 @@ impl App { deposits_in_this_block, ) .wrap_err("failed to convert block info and data to SequencerBlock")?; - state_tx + delta_delta .put_sequencer_block(sequencer_block) .wrap_err("failed to write sequencer block to state")?; @@ -947,11 +945,11 @@ impl App { tx_results: finalize_block_tx_results, }; - state_tx.object_put(POST_TRANSACTION_EXECUTION_RESULT_KEY, result); + delta_delta.object_put(POST_TRANSACTION_EXECUTION_RESULT_KEY, result); // events that occur after end_block are ignored here; // there should be none anyways. - let _ = self.apply(state_tx); + let _ = self.apply(delta_delta); Ok(()) } @@ -1049,11 +1047,15 @@ impl App { if self.recost_mempool { self.metrics.increment_mempool_recosted(); } - update_mempool_after_finalization(&mut self.mempool, &self.state, self.recost_mempool) - .await; + update_mempool_after_finalization( + &mut self.mempool, + &self.state_delta, + self.recost_mempool, + ) + .await; let post_transaction_execution_result: PostTransactionExecutionResult = self - .state + .state_delta .object_get(POST_TRANSACTION_EXECUTION_RESULT_KEY) .expect( "post_transaction_execution_result must be present, as txs were already executed \ @@ -1079,17 +1081,20 @@ impl App { #[instrument(skip_all, err)] async fn prepare_commit(&mut self, storage: Storage) -> Result<AppHash> { // extract the state we've built up to so we can prepare it as a `StagedWriteBatch`. - let dummy_state = StateDelta::new(storage.latest_snapshot()); - let mut state = Arc::try_unwrap(std::mem::replace(&mut self.state, Arc::new(dummy_state))) - .expect("we have exclusive ownership of the State at commit()"); + let dummy_state = storage.new_delta_of_latest_snapshot(); + let mut state_delta = Arc::try_unwrap(std::mem::replace( + &mut self.state_delta, + Arc::new(dummy_state), + )) + .expect("we have exclusive ownership of the State at commit()"); // store the storage version indexed by block height let new_version = storage.latest_version().wrapping_add(1); - let height = state + let height = state_delta .get_block_height() .await .expect("block height must be set, as `put_block_height` was already called"); - state + state_delta .put_storage_version_by_height(height, new_version) .wrap_err("failed to put storage version by height")?; debug!( @@ -1099,9 +1104,8 @@ impl App { ); let write_batch = storage - .prepare_commit(state) + .prepare_commit(state_delta) .await - .map_err(anyhow_to_eyre) .wrap_err("failed to prepare commit")?; let app_hash: AppHash = write_batch .root_hash() @@ -1118,34 +1122,34 @@ impl App { &mut self, begin_block: &abci::request::BeginBlock, ) -> Result<Vec<abci::Event>> { - let mut state_tx = StateDelta::new(self.state.clone()); + let mut delta_delta = StateDelta::new(self.state_delta.clone()); - state_tx + delta_delta .put_block_height(begin_block.header.height.into()) .wrap_err("failed to put block height")?; - state_tx + delta_delta .put_block_timestamp(begin_block.header.time) .wrap_err("failed to put block timestamp")?; // call begin_block on all components - let mut arc_state_tx = Arc::new(state_tx); - AccountsComponent::begin_block(&mut arc_state_tx, begin_block) + let mut arc_delta_delta = Arc::new(delta_delta); + AccountsComponent::begin_block(&mut arc_delta_delta, begin_block) .await .wrap_err("begin_block failed on AccountsComponent")?; - AuthorityComponent::begin_block(&mut arc_state_tx, begin_block) + AuthorityComponent::begin_block(&mut arc_delta_delta, begin_block) .await .wrap_err("begin_block failed on AuthorityComponent")?; - IbcComponent::begin_block(&mut arc_state_tx, begin_block) + IbcComponent::begin_block(&mut arc_delta_delta, begin_block) .await .wrap_err("begin_block failed on IbcComponent")?; - FeesComponent::begin_block(&mut arc_state_tx, begin_block) + FeesComponent::begin_block(&mut arc_delta_delta, begin_block) .await .wrap_err("begin_block failed on FeesComponent")?; - let state_tx = Arc::try_unwrap(arc_state_tx) + let delta_delta = Arc::try_unwrap(arc_delta_delta) .expect("components should not retain copies of shared state"); - Ok(self.apply(state_tx)) + Ok(self.apply(delta_delta)) } /// Executes a signed transaction. @@ -1156,13 +1160,13 @@ impl App { .await .wrap_err("stateless check failed")?; - let mut state_tx = self - .state + let mut delta_delta = self + .state_delta .try_begin_transaction() .expect("state Arc should be present and unique"); signed_tx - .check_and_execute(&mut state_tx) + .check_and_execute(&mut delta_delta) .await .wrap_err("failed executing transaction")?; @@ -1175,7 +1179,7 @@ impl App { .any(|act| act.is_fee_asset_change() || act.is_fee_change()); // index all event attributes - let mut events = state_tx.apply().1; + let mut events = delta_delta.apply().1; for event in &mut events { event .attributes @@ -1192,8 +1196,8 @@ impl App { height: u64, fee_recipient: &[u8; 20], ) -> Result<abci::response::EndBlock> { - let state_tx = StateDelta::new(self.state.clone()); - let mut arc_state_tx = Arc::new(state_tx); + let delta_delta = StateDelta::new(self.state_delta.clone()); + let mut arc_delta_delta = Arc::new(delta_delta); let end_block = abci::request::EndBlock { height: height @@ -1202,43 +1206,43 @@ impl App { }; // call end_block on all components - AccountsComponent::end_block(&mut arc_state_tx, &end_block) + AccountsComponent::end_block(&mut arc_delta_delta, &end_block) .await .wrap_err("end_block failed on AccountsComponent")?; - AuthorityComponent::end_block(&mut arc_state_tx, &end_block) + AuthorityComponent::end_block(&mut arc_delta_delta, &end_block) .await .wrap_err("end_block failed on AuthorityComponent")?; - FeesComponent::end_block(&mut arc_state_tx, &end_block) + FeesComponent::end_block(&mut arc_delta_delta, &end_block) .await .wrap_err("end_block failed on FeesComponent")?; - IbcComponent::end_block(&mut arc_state_tx, &end_block) + IbcComponent::end_block(&mut arc_delta_delta, &end_block) .await .wrap_err("end_block failed on IbcComponent")?; - let mut state_tx = Arc::try_unwrap(arc_state_tx) + let mut delta_delta = Arc::try_unwrap(arc_delta_delta) .expect("components should not retain copies of shared state"); // gather and return validator updates let validator_updates = self - .state + .state_delta .get_validator_updates() .await .expect("failed getting validator updates"); // clear validator updates - state_tx.clear_validator_updates(); + delta_delta.clear_validator_updates(); // gather block fees and transfer them to the block proposer - let fees = self.state.get_block_fees(); + let fees = self.state_delta.get_block_fees(); for fee in fees { - state_tx + delta_delta .increase_balance(fee_recipient, fee.asset(), fee.amount()) .await .wrap_err("failed to increase fee recipient balance")?; } - let events = self.apply(state_tx); + let events = self.apply(delta_delta); Ok(abci::response::EndBlock { validator_updates: validator_updates .try_into_cometbft() @@ -1267,7 +1271,7 @@ impl App { .expect("root hash to app hash conversion must succeed"); // Get the latest version of the state, now that we've committed it. - self.state = Arc::new(StateDelta::new(storage.latest_snapshot())); + self.state_delta = Arc::new(storage.new_delta_of_latest_snapshot()); } // StateDelta::apply only works when the StateDelta wraps an underlying @@ -1276,16 +1280,16 @@ impl App { // access. This method "externally" applies the state delta to the // inter-block state. // - // Invariant: state_tx and self.state are the only two references to the + // Invariant: delta_delta and self.state are the only two references to the // inter-block state. - fn apply(&mut self, state_tx: StateDelta<InterBlockState>) -> Vec<Event> { - let (state2, mut cache) = state_tx.flatten(); - std::mem::drop(state2); + fn apply(&mut self, delta_delta: StateDelta<InterBlockState>) -> Vec<Event> { + let (state2, mut cache) = delta_delta.flatten(); + drop(state2); // Now there is only one reference to the inter-block state: self.state let events = cache.take_events(); cache.apply_to( - Arc::get_mut(&mut self.state).expect("no other references to inter-block state"), + Arc::get_mut(&mut self.state_delta).expect("no other references to inter-block state"), ); events diff --git a/crates/astria-sequencer/src/app/state_ext.rs b/crates/astria-sequencer/src/app/state_ext.rs index b9a2e3b78e..aa2fff079f 100644 --- a/crates/astria-sequencer/src/app/state_ext.rs +++ b/crates/astria-sequencer/src/app/state_ext.rs @@ -173,9 +173,8 @@ fn revision_number_from_chain_id(chain_id: &str) -> u64 { #[cfg(test)] mod tests { - use cnidarium::StateDelta; - use super::*; + use crate::storage::Storage; #[test] fn revision_number_from_chain_id_regex() { @@ -200,23 +199,22 @@ mod tests { #[tokio::test] async fn put_chain_id_and_revision_number() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // doesn't exist at first - let _ = state + let _ = state_delta .get_chain_id() .await .expect_err("no chain ID should exist at first"); // can write new let chain_id_orig: tendermint::chain::Id = "test-chain-orig".try_into().unwrap(); - state + state_delta .put_chain_id_and_revision_number(chain_id_orig.clone()) .unwrap(); assert_eq!( - state + state_delta .get_chain_id() .await .expect("a chain ID was written and must exist inside the database"), @@ -225,7 +223,7 @@ mod tests { ); assert_eq!( - state + state_delta .get_revision_number() .await .expect("getting the revision number should succeed"), @@ -235,11 +233,11 @@ mod tests { // can rewrite with new value let chain_id_update: tendermint::chain::Id = "test-chain-update".try_into().unwrap(); - state + state_delta .put_chain_id_and_revision_number(chain_id_update.clone()) .unwrap(); assert_eq!( - state + state_delta .get_chain_id() .await .expect("a new chain ID was written and must exist inside the database"), @@ -248,7 +246,7 @@ mod tests { ); assert_eq!( - state + state_delta .get_revision_number() .await .expect("getting the revision number should succeed"), @@ -258,11 +256,11 @@ mod tests { // can rewrite with chain id with revision number let chain_id_update: tendermint::chain::Id = "test-chain-99".try_into().unwrap(); - state + state_delta .put_chain_id_and_revision_number(chain_id_update.clone()) .unwrap(); assert_eq!( - state + state_delta .get_chain_id() .await .expect("a new chain ID was written and must exist inside the database"), @@ -271,7 +269,7 @@ mod tests { ); assert_eq!( - state + state_delta .get_revision_number() .await .expect("getting the revision number should succeed"), @@ -282,21 +280,20 @@ mod tests { #[tokio::test] async fn block_height() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // doesn't exist at first - let _ = state + let _ = state_delta .get_block_height() .await .expect_err("no block height should exist at first"); // can write new let block_height_orig = 0; - state.put_block_height(block_height_orig).unwrap(); + state_delta.put_block_height(block_height_orig).unwrap(); assert_eq!( - state + state_delta .get_block_height() .await .expect("a block height was written and must exist inside the database"), @@ -306,9 +303,9 @@ mod tests { // can rewrite with new value let block_height_update = 1; - state.put_block_height(block_height_update).unwrap(); + state_delta.put_block_height(block_height_update).unwrap(); assert_eq!( - state + state_delta .get_block_height() .await .expect("a new block height was written and must exist inside the database"), @@ -319,21 +316,22 @@ mod tests { #[tokio::test] async fn block_timestamp() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // doesn't exist at first - let _ = state + let _ = state_delta .get_block_timestamp() .await .expect_err("no block timestamp should exist at first"); // can write new let block_timestamp_orig = Time::from_unix_timestamp(1_577_836_800, 0).unwrap(); - state.put_block_timestamp(block_timestamp_orig).unwrap(); + state_delta + .put_block_timestamp(block_timestamp_orig) + .unwrap(); assert_eq!( - state + state_delta .get_block_timestamp() .await .expect("a block timestamp was written and must exist inside the database"), @@ -343,9 +341,11 @@ mod tests { // can rewrite with new value let block_timestamp_update = Time::from_unix_timestamp(1_577_836_801, 0).unwrap(); - state.put_block_timestamp(block_timestamp_update).unwrap(); + state_delta + .put_block_timestamp(block_timestamp_update) + .unwrap(); assert_eq!( - state + state_delta .get_block_timestamp() .await .expect("a new block timestamp was written and must exist inside the database"), @@ -356,24 +356,23 @@ mod tests { #[tokio::test] async fn storage_version() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // doesn't exist at first let block_height_orig = 0; - let _ = state + let _ = state_delta .get_storage_version_by_height(block_height_orig) .await .expect_err("no block height should exist at first"); // can write for block height 0 let storage_version_orig = 0; - state + state_delta .put_storage_version_by_height(block_height_orig, storage_version_orig) .unwrap(); assert_eq!( - state + state_delta .get_storage_version_by_height(block_height_orig) .await .expect("a storage version was written and must exist inside the database"), @@ -383,11 +382,11 @@ mod tests { // can update block height 0 let storage_version_update = 0; - state + state_delta .put_storage_version_by_height(block_height_orig, storage_version_update) .unwrap(); assert_eq!( - state + state_delta .get_storage_version_by_height(block_height_orig) .await .expect("a new storage version was written and must exist inside the database"), @@ -397,11 +396,11 @@ mod tests { // can write block 1 and block 0 is unchanged let block_height_update = 1; - state + state_delta .put_storage_version_by_height(block_height_update, storage_version_orig) .unwrap(); assert_eq!( - state + state_delta .get_storage_version_by_height(block_height_update) .await .expect("a second storage version was written and must exist inside the database"), @@ -409,7 +408,7 @@ mod tests { "additional storage version was not what was expected" ); assert_eq!( - state + state_delta .get_storage_version_by_height(block_height_orig) .await .expect( diff --git a/crates/astria-sequencer/src/app/tests_app/mempool.rs b/crates/astria-sequencer/src/app/tests_app/mempool.rs index 4a30b4460b..65eb723fe6 100644 --- a/crates/astria-sequencer/src/app/tests_app/mempool.rs +++ b/crates/astria-sequencer/src/app/tests_app/mempool.rs @@ -356,7 +356,7 @@ async fn maintenance_recosting_promotes() { // see transfer went through assert_eq!( - app.state + app.state_delta .get_account_balance(&astria_address_from_hex_string(CAROL_ADDRESS), &nria()) .await .unwrap(), @@ -549,7 +549,7 @@ async fn maintenance_funds_added_promotes() { app.commit(storage.clone()).await; // see transfer went through assert_eq!( - app.state + app.state_delta .get_account_balance(&astria_address_from_hex_string(BOB_ADDRESS), &nria()) .await .unwrap(), diff --git a/crates/astria-sequencer/src/app/tests_app/mod.rs b/crates/astria-sequencer/src/app/tests_app/mod.rs index c0cd55b1ca..db86dfd674 100644 --- a/crates/astria-sequencer/src/app/tests_app/mod.rs +++ b/crates/astria-sequencer/src/app/tests_app/mod.rs @@ -102,7 +102,7 @@ fn default_tendermint_header() -> Header { #[tokio::test] async fn app_genesis_and_init_chain() { let app = initialize_app(None, vec![]).await; - assert_eq!(app.state.get_block_height().await.unwrap(), 0); + assert_eq!(app.state_delta.get_block_height().await.unwrap(), 0); for Account { address, @@ -111,7 +111,7 @@ async fn app_genesis_and_init_chain() { { assert_eq!( balance, - app.state + app.state_delta .get_account_balance(&address, &nria()) .await .unwrap(), @@ -119,7 +119,7 @@ async fn app_genesis_and_init_chain() { } assert_eq!( - app.state.get_native_asset().await.unwrap(), + app.state_delta.get_native_asset().await.unwrap(), Some("nria".parse::<TracePrefixed>().unwrap()), ); } @@ -139,9 +139,9 @@ async fn app_pre_execute_transactions() { app.pre_execute_transactions(block_data.clone()) .await .unwrap(); - assert_eq!(app.state.get_block_height().await.unwrap(), 1); + assert_eq!(app.state_delta.get_block_height().await.unwrap(), 1); assert_eq!( - app.state.get_block_timestamp().await.unwrap(), + app.state_delta.get_block_timestamp().await.unwrap(), block_data.time ); } @@ -188,7 +188,7 @@ async fn app_begin_block_remove_byzantine_validators() { app.begin_block(&begin_block).await.unwrap(); // assert that validator with pubkey_a is removed - let validator_set = app.state.get_validator_set().await.unwrap(); + let validator_set = app.state_delta.get_validator_set().await.unwrap(); assert_eq!(validator_set.len(), 1); assert_eq!(validator_set.get(&verification_key(2)).unwrap().power, 1,); } @@ -196,7 +196,7 @@ async fn app_begin_block_remove_byzantine_validators() { #[tokio::test] async fn app_commit() { let (mut app, storage) = initialize_app_with_storage(None, vec![]).await; - assert_eq!(app.state.get_block_height().await.unwrap(), 0); + assert_eq!(app.state_delta.get_block_height().await.unwrap(), 0); for Account { address, @@ -205,7 +205,7 @@ async fn app_commit() { { assert_eq!( balance, - app.state + app.state_delta .get_account_balance(&address, &nria()) .await .unwrap() @@ -283,20 +283,20 @@ async fn app_transfer_block_fees_to_sudo() { // assert that transaction fees were transferred to the block proposer let transfer_base_fee = app - .state + .state_delta .get_fees::<Transfer>() .await .expect("should not error fetching transfer fees") .expect("transfer fees should be stored") .base(); assert_eq!( - app.state + app.state_delta .get_account_balance(&astria_address_from_hex_string(JUDY_ADDRESS), &nria()) .await .unwrap(), transfer_base_fee, ); - assert_eq!(app.state.get_block_fees().len(), 0); + assert_eq!(app.state_delta.get_block_fees().len(), 0); } #[tokio::test] @@ -315,11 +315,11 @@ async fn app_create_sequencer_block_with_sequenced_data_and_deposits() { let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); let starting_index_of_action = 0; - let mut state_tx = StateDelta::new(app.state.clone()); - state_tx + let mut delta_delta = StateDelta::new(app.state_delta.clone()); + delta_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + delta_delta .put_bridge_account_ibc_asset(&bridge_address, nria()) .unwrap(); // Put a deposit from a previous block to ensure it is not mixed in with deposits for this @@ -333,13 +333,13 @@ async fn app_create_sequencer_block_with_sequenced_data_and_deposits() { source_transaction_id: TransactionId::new([99; 32]), source_action_index: starting_index_of_action, }; - state_tx + delta_delta .put_deposits( &[32u8; 32], HashMap::from_iter([(rollup_id, vec![old_deposit])]), ) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); app.prepare_commit(storage.clone()).await.unwrap(); app.commit(storage.clone()).await; @@ -395,7 +395,11 @@ async fn app_create_sequencer_block_with_sequenced_data_and_deposits() { .unwrap(); app.commit(storage).await; - let block = app.state.get_sequencer_block_by_height(1).await.unwrap(); + let block = app + .state_delta + .get_sequencer_block_by_height(1) + .await + .unwrap(); let mut deposits = vec![]; for (_, rollup_data) in block.rollup_transactions() { for tx in rollup_data.transactions() { @@ -424,14 +428,14 @@ async fn app_execution_results_match_proposal_vs_after_proposal() { let asset = nria().clone(); let starting_index_of_action = 0; - let mut state_tx = StateDelta::new(app.state.clone()); - state_tx + let mut delta_delta = StateDelta::new(app.state_delta.clone()); + delta_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + delta_delta .put_bridge_account_ibc_asset(&bridge_address, &asset) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); app.prepare_commit(storage.clone()).await.unwrap(); app.commit(storage.clone()).await; @@ -529,7 +533,7 @@ async fn app_execution_results_match_proposal_vs_after_proposal() { Some(proposal_fingerprint) ); - app.mempool.run_maintenance(&app.state, false).await; + app.mempool.run_maintenance(&app.state_delta, false).await; assert_eq!(app.mempool.len().await, 0); @@ -654,7 +658,7 @@ async fn app_prepare_proposal_cometbft_max_bytes_overflow_ok() { .expect("too large transactions should not cause prepare proposal to fail"); // run maintence to clear out transactions - app.mempool.run_maintenance(&app.state, false).await; + app.mempool.run_maintenance(&app.state_delta, false).await; // see only first tx made it in assert_eq!( @@ -743,7 +747,7 @@ async fn app_prepare_proposal_sequencer_max_bytes_overflow_ok() { .expect("too large transactions should not cause prepare proposal to fail"); // run maintence to clear out transactions - app.mempool.run_maintenance(&app.state, false).await; + app.mempool.run_maintenance(&app.state_delta, false).await; // see only first tx made it in assert_eq!( @@ -906,11 +910,11 @@ async fn app_end_block_validator_updates() { }, ]; - let mut state_tx = StateDelta::new(app.state.clone()); - state_tx + let mut delta_delta = StateDelta::new(app.state_delta.clone()); + delta_delta .put_validator_updates(ValidatorSet::new_from_updates(validator_updates.clone())) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); let resp = app.end_block(1, &proposer_address).await.unwrap(); // we only assert length here as the ordering of the updates is not guaranteed @@ -920,7 +924,7 @@ async fn app_end_block_validator_updates() { // validator with pubkey_a should be removed (power set to 0) // validator with pubkey_b should be updated // validator with pubkey_c should be added - let validator_set = app.state.get_validator_set().await.unwrap(); + let validator_set = app.state_delta.get_validator_set().await.unwrap(); assert_eq!(validator_set.len(), 2); let validator_b = validator_set .get(verification_key(1).address_bytes()) @@ -932,5 +936,8 @@ async fn app_end_block_validator_updates() { .unwrap(); assert_eq!(validator_c.verification_key, verification_key(2)); assert_eq!(validator_c.power, 100); - assert_eq!(app.state.get_validator_updates().await.unwrap().len(), 0); + assert_eq!( + app.state_delta.get_validator_updates().await.unwrap().len(), + 0 + ); } diff --git a/crates/astria-sequencer/src/app/tests_block_ordering.rs b/crates/astria-sequencer/src/app/tests_block_ordering.rs index c8c28893f4..cfbfee0936 100644 --- a/crates/astria-sequencer/src/app/tests_block_ordering.rs +++ b/crates/astria-sequencer/src/app/tests_block_ordering.rs @@ -208,7 +208,7 @@ async fn app_prepare_proposal_account_block_misordering_ok() { "expected to contain first transaction" ); - app.mempool.run_maintenance(&app.state, false).await; + app.mempool.run_maintenance(&app.state_delta, false).await; assert_eq!( app.mempool.len().await, 1, @@ -240,6 +240,6 @@ async fn app_prepare_proposal_account_block_misordering_ok() { "expected to contain second transaction" ); - app.mempool.run_maintenance(&app.state, false).await; + app.mempool.run_maintenance(&app.state_delta, false).await; assert_eq!(app.mempool.len().await, 0, "mempool should be empty"); } diff --git a/crates/astria-sequencer/src/app/tests_breaking_changes.rs b/crates/astria-sequencer/src/app/tests_breaking_changes.rs index 2118d41d4d..35f3610b4c 100644 --- a/crates/astria-sequencer/src/app/tests_breaking_changes.rs +++ b/crates/astria-sequencer/src/app/tests_breaking_changes.rs @@ -93,14 +93,14 @@ async fn app_finalize_block_snapshot() { let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); let starting_index_of_action = 0; - let mut state_tx = StateDelta::new(app.state.clone()); - state_tx + let mut delta_delta = StateDelta::new(app.state_delta.clone()); + delta_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + delta_delta .put_bridge_account_ibc_asset(&bridge_address, nria()) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); // the state changes must be committed, as `finalize_block` will execute the // changes on the latest snapshot, not the app's `StateDelta`. @@ -362,7 +362,7 @@ async fn app_execute_transaction_with_every_action_snapshot() { let signed_tx = Arc::new(tx_bridge.sign(&bridge)); app.execute_transaction(signed_tx).await.unwrap(); - let sudo_address = app.state.get_sudo_address().await.unwrap(); + let sudo_address = app.state_delta.get_sudo_address().await.unwrap(); app.end_block(1, &sudo_address).await.unwrap(); app.prepare_commit(storage.clone()).await.unwrap(); diff --git a/crates/astria-sequencer/src/app/tests_execute_transaction.rs b/crates/astria-sequencer/src/app/tests_execute_transaction.rs index 7f5d778e36..0f2c0731ec 100644 --- a/crates/astria-sequencer/src/app/tests_execute_transaction.rs +++ b/crates/astria-sequencer/src/app/tests_execute_transaction.rs @@ -130,29 +130,38 @@ async fn app_execute_transaction_transfer() { app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state + app.state_delta .get_account_balance(&bob_address, &nria()) .await .unwrap(), value + 10u128.pow(19) ); let transfer_base = app - .state + .state_delta .get_fees::<Transfer>() .await .expect("should not error fetching transfer fees") .expect("transfer fees should be stored") .base(); assert_eq!( - app.state + app.state_delta .get_account_balance(&alice_address, &nria()) .await .unwrap(), 10u128.pow(19) - (value + transfer_base), ); - assert_eq!(app.state.get_account_nonce(&bob_address).await.unwrap(), 0); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&bob_address) + .await + .unwrap(), + 0 + ); + assert_eq!( + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); } @@ -168,11 +177,11 @@ async fn app_execute_transaction_transfer_not_native_token() { let alice = get_alice_signing_key(); let alice_address = astria_address(&alice.address_bytes()); - let mut state_tx = StateDelta::new(app.state.clone()); - state_tx + let mut delta_delta = StateDelta::new(app.state_delta.clone()); + delta_delta .put_account_balance(&alice_address, &test_asset(), value) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); // transfer funds from Alice to Bob; use native token for fee payment let bob_address = astria_address_from_hex_string(BOB_ADDRESS); @@ -194,14 +203,14 @@ async fn app_execute_transaction_transfer_not_native_token() { app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state + app.state_delta .get_account_balance(&bob_address, &nria()) .await .unwrap(), 10u128.pow(19), // genesis balance ); assert_eq!( - app.state + app.state_delta .get_account_balance(&bob_address, &test_asset()) .await .unwrap(), @@ -209,30 +218,39 @@ async fn app_execute_transaction_transfer_not_native_token() { ); let transfer_base = app - .state + .state_delta .get_fees::<Transfer>() .await .expect("should not error fetching transfer fees") .expect("transfer fees should be stored") .base(); assert_eq!( - app.state + app.state_delta .get_account_balance(&alice_address, &nria()) .await .unwrap(), 10u128.pow(19) - transfer_base, // genesis balance - fee ); assert_eq!( - app.state + app.state_delta .get_account_balance(&alice_address, &test_asset()) .await .unwrap(), 0, // 0 since all funds of `asset` were transferred ); - assert_eq!(app.state.get_account_nonce(&bob_address).await.unwrap(), 0); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&bob_address) + .await + .unwrap(), + 0 + ); + assert_eq!( + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); } @@ -275,16 +293,16 @@ async fn app_execute_transaction_transfer_balance_too_low_for_fee() { #[tokio::test] async fn app_execute_transaction_sequence() { let mut app = initialize_app(None, vec![]).await; - let mut state_tx = StateDelta::new(app.state.clone()); - state_tx + let mut delta_delta = StateDelta::new(app.state_delta.clone()); + delta_delta .put_fees(FeeComponents::<RollupDataSubmission>::new(0, 1)) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); let alice = get_alice_signing_key(); let alice_address = astria_address(&alice.address_bytes()); let data = Bytes::from_static(b"hello world"); - let fee = calculate_rollup_data_submission_fee_from_state(&data, &app.state).await; + let fee = calculate_rollup_data_submission_fee_from_state(&data, &app.state_delta).await; let tx = TransactionBody::builder() .actions(vec![ @@ -302,12 +320,15 @@ async fn app_execute_transaction_sequence() { let signed_tx = Arc::new(tx.sign(&alice)); app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); assert_eq!( - app.state + app.state_delta .get_account_balance(&alice_address, &nria()) .await .unwrap(), @@ -360,11 +381,14 @@ async fn app_execute_transaction_validator_update() { let signed_tx = Arc::new(tx.sign(&alice)); app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); - let validator_updates = app.state.get_validator_updates().await.unwrap(); + let validator_updates = app.state_delta.get_validator_updates().await.unwrap(); assert_eq!(validator_updates.len(), 1); assert_eq!( validator_updates.get(verification_key(1).address_bytes()), @@ -390,10 +414,13 @@ async fn app_execute_transaction_ibc_relayer_change_addition() { let signed_tx = Arc::new(tx.sign(&alice)); app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); - assert!(app.state.is_ibc_relayer(alice_address).await.unwrap()); + assert!(app.state_delta.is_ibc_relayer(alice_address).await.unwrap()); } #[tokio::test] @@ -418,10 +445,13 @@ async fn app_execute_transaction_ibc_relayer_change_deletion() { let signed_tx = Arc::new(tx.sign(&alice)); app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); - assert!(!app.state.is_ibc_relayer(alice_address).await.unwrap()); + assert!(!app.state_delta.is_ibc_relayer(alice_address).await.unwrap()); } #[tokio::test] @@ -469,11 +499,14 @@ async fn app_execute_transaction_sudo_address_change() { let signed_tx = Arc::new(tx.sign(&alice)); app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); - let sudo_address = app.state.get_sudo_address().await.unwrap(); + let sudo_address = app.state_delta.get_sudo_address().await.unwrap(); assert_eq!(sudo_address, new_address.bytes()); } @@ -534,11 +567,19 @@ async fn app_execute_transaction_fee_asset_change_addition() { let signed_tx = Arc::new(tx.sign(&alice)); app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); - assert!(app.state.is_allowed_fee_asset(&test_asset()).await.unwrap()); + assert!( + app.state_delta + .is_allowed_fee_asset(&test_asset()) + .await + .unwrap() + ); } #[tokio::test] @@ -568,11 +609,19 @@ async fn app_execute_transaction_fee_asset_change_removal() { let signed_tx = Arc::new(tx.sign(&alice)); app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); - assert!(!app.state.is_allowed_fee_asset(&test_asset()).await.unwrap()); + assert!( + !app.state_delta + .is_allowed_fee_asset(&test_asset()) + .await + .unwrap() + ); } #[tokio::test] @@ -609,12 +658,12 @@ async fn app_execute_transaction_init_bridge_account_ok() { let alice_address = astria_address(&alice.address_bytes()); let mut app = initialize_app(None, vec![]).await; - let mut state_tx = StateDelta::new(app.state.clone()); + let mut delta_delta = StateDelta::new(app.state_delta.clone()); let fee = 12; // arbitrary - state_tx + delta_delta .put_fees(FeeComponents::<InitBridgeAccount>::new(fee, 0)) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); let action = InitBridgeAccount { @@ -634,17 +683,20 @@ async fn app_execute_transaction_init_bridge_account_ok() { let signed_tx = Arc::new(tx.sign(&alice)); let before_balance = app - .state + .state_delta .get_account_balance(&alice_address, &nria()) .await .unwrap(); app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); assert_eq!( - app.state + app.state_delta .get_bridge_account_rollup_id(&alice_address) .await .unwrap() @@ -652,14 +704,14 @@ async fn app_execute_transaction_init_bridge_account_ok() { rollup_id ); assert_eq!( - app.state + app.state_delta .get_bridge_account_ibc_asset(&alice_address) .await .unwrap(), nria().to_ibc_prefixed(), ); assert_eq!( - app.state + app.state_delta .get_account_balance(&alice_address, &nria()) .await .unwrap(), @@ -719,14 +771,14 @@ async fn app_execute_transaction_bridge_lock_action_ok() { let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); let starting_index_of_action = 0; - let mut state_tx = StateDelta::new(app.state.clone()); - state_tx + let mut delta_delta = StateDelta::new(app.state_delta.clone()); + delta_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + delta_delta .put_bridge_account_ibc_asset(&bridge_address, nria()) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); let amount = 100; let action = BridgeLock { @@ -745,14 +797,17 @@ async fn app_execute_transaction_bridge_lock_action_ok() { let signed_tx = Arc::new(tx.sign(&alice)); let bridge_before_balance = app - .state + .state_delta .get_account_balance(&bridge_address, &nria()) .await .unwrap(); app.execute_transaction(signed_tx.clone()).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); let expected_deposit = Deposit { @@ -766,14 +821,14 @@ async fn app_execute_transaction_bridge_lock_action_ok() { }; assert_eq!( - app.state + app.state_delta .get_account_balance(&bridge_address, &nria()) .await .unwrap(), bridge_before_balance + amount ); - let all_deposits = app.state.get_cached_block_deposits(); + let all_deposits = app.state_delta.get_cached_block_deposits(); let deposits = all_deposits.get(&rollup_id).unwrap(); assert_eq!(deposits.len(), 1); assert_eq!(deposits[0], expected_deposit); @@ -836,11 +891,14 @@ async fn app_execute_transaction_invalid_nonce() { // check that tx was not executed by checking nonce and balance are unchanged assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 0 ); assert_eq!( - app.state + app.state_delta .get_account_balance(&alice_address, &nria()) .await .unwrap(), @@ -883,11 +941,14 @@ async fn app_execute_transaction_invalid_chain_id() { // check that tx was not executed by checking nonce and balance are unchanged assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 0 ); assert_eq!( - app.state + app.state_delta .get_account_balance(&alice_address, &nria()) .await .unwrap(), @@ -918,7 +979,8 @@ async fn app_stateful_check_fails_insufficient_total_balance() { // figure out needed fee for a single transfer let data = Bytes::from_static(b"hello world"); - let fee = calculate_rollup_data_submission_fee_from_state(&data, &app.state.clone()).await; + let fee = + calculate_rollup_data_submission_fee_from_state(&data, &app.state_delta.clone()).await; // transfer just enough to cover single sequence fee with data let signed_tx = TransactionBody::builder() @@ -959,7 +1021,7 @@ async fn app_stateful_check_fails_insufficient_total_balance() { .sign(&keypair); // try double, see fails stateful check let res = signed_tx_fail - .check_and_execute(Arc::get_mut(&mut app.state).unwrap()) + .check_and_execute(Arc::get_mut(&mut app.state_delta).unwrap()) .await .unwrap_err() .root_cause() @@ -982,7 +1044,7 @@ async fn app_stateful_check_fails_insufficient_total_balance() { .sign(&keypair); signed_tx_pass - .check_and_execute(Arc::get_mut(&mut app.state).unwrap()) + .check_and_execute(Arc::get_mut(&mut app.state_delta).unwrap()) .await .expect("stateful check should pass since we transferred enough to cover fee"); } @@ -995,7 +1057,7 @@ async fn app_execute_transaction_bridge_lock_unlock_action_ok() { let alice_address = astria_address(&alice.address_bytes()); let mut app = initialize_app(None, vec![]).await; - let mut state_tx = StateDelta::new(app.state.clone()); + let mut delta_delta = StateDelta::new(app.state_delta.clone()); let bridge = get_bridge_signing_key(); let bridge_address = astria_address(&bridge.address_bytes()); @@ -1004,27 +1066,27 @@ async fn app_execute_transaction_bridge_lock_unlock_action_ok() { // give bridge eoa funds so it can pay for the // unlock transfer action let transfer_base = app - .state + .state_delta .get_fees::<Transfer>() .await .expect("should not error fetching transfer fees") .expect("transfer fees should be stored") .base(); - state_tx + delta_delta .put_account_balance(&bridge_address, &nria(), transfer_base) .unwrap(); // create bridge account - state_tx + delta_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + delta_delta .put_bridge_account_ibc_asset(&bridge_address, nria()) .unwrap(); - state_tx + delta_delta .put_bridge_account_withdrawer_address(&bridge_address, bridge_address) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); let amount = 100; let action = BridgeLock { @@ -1044,7 +1106,10 @@ async fn app_execute_transaction_bridge_lock_unlock_action_ok() { app.execute_transaction(signed_tx).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); @@ -1070,7 +1135,7 @@ async fn app_execute_transaction_bridge_lock_unlock_action_ok() { .await .expect("executing bridge unlock action should succeed"); assert_eq!( - app.state + app.state_delta .get_account_balance(&bridge_address, &nria()) .await .expect("executing bridge unlock action should succeed"), @@ -1089,14 +1154,14 @@ async fn app_execute_transaction_action_index_correctly_increments() { let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); let starting_index_of_action = 0; - let mut state_tx = StateDelta::new(app.state.clone()); - state_tx + let mut delta_delta = StateDelta::new(app.state_delta.clone()); + delta_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + delta_delta .put_bridge_account_ibc_asset(&bridge_address, nria()) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); let amount = 100; let action = BridgeLock { @@ -1116,11 +1181,14 @@ async fn app_execute_transaction_action_index_correctly_increments() { let signed_tx = Arc::new(tx.sign(&alice)); app.execute_transaction(signed_tx.clone()).await.unwrap(); assert_eq!( - app.state.get_account_nonce(&alice_address).await.unwrap(), + app.state_delta + .get_account_nonce(&alice_address) + .await + .unwrap(), 1 ); - let all_deposits = app.state.get_cached_block_deposits(); + let all_deposits = app.state_delta.get_cached_block_deposits(); let deposits = all_deposits.get(&rollup_id).unwrap(); assert_eq!(deposits.len(), 2); assert_eq!(deposits[0].source_action_index, starting_index_of_action); @@ -1133,18 +1201,18 @@ async fn app_execute_transaction_action_index_correctly_increments() { #[tokio::test] async fn transaction_execution_records_deposit_event() { let mut app = initialize_app(None, vec![]).await; - let mut state_tx = app - .state + let mut delta_delta = app + .state_delta .try_begin_transaction() .expect("state Arc should be present and unique"); let alice = get_alice_signing_key(); let bob_address = astria_address_from_hex_string(BOB_ADDRESS); - state_tx + delta_delta .put_bridge_account_rollup_id(&bob_address, [0; 32].into()) .unwrap(); - state_tx.put_allowed_fee_asset(&nria()).unwrap(); - state_tx + delta_delta.put_allowed_fee_asset(&nria()).unwrap(); + delta_delta .put_bridge_account_ibc_asset(&bob_address, nria()) .unwrap(); @@ -1173,8 +1241,8 @@ async fn transaction_execution_records_deposit_event() { }; let expected_deposit_event = create_deposit_event(&expected_deposit); - signed_tx.check_and_execute(&mut state_tx).await.unwrap(); - let events = &state_tx.apply().1; + signed_tx.check_and_execute(&mut delta_delta).await.unwrap(); + let events = &delta_delta.apply().1; let event = events .iter() .find(|event| event.kind == "tx.deposit") @@ -1201,7 +1269,7 @@ async fn app_execute_transaction_ibc_sudo_change() { let signed_tx = Arc::new(tx.sign(&alice)); app.execute_transaction(signed_tx).await.unwrap(); - let ibc_sudo_address = app.state.get_ibc_sudo_address().await.unwrap(); + let ibc_sudo_address = app.state_delta.get_ibc_sudo_address().await.unwrap(); assert_eq!(ibc_sudo_address, new_address.bytes()); } @@ -1278,19 +1346,19 @@ async fn transaction_execution_records_fee_event() { #[tokio::test] async fn ensure_all_event_attributes_are_indexed() { let mut app = initialize_app(None, vec![]).await; - let mut state_tx = StateDelta::new(app.state.clone()); + let mut delta_delta = StateDelta::new(app.state_delta.clone()); let alice = get_alice_signing_key(); let bob_address = astria_address_from_hex_string(BOB_ADDRESS); let value = 333_333; - state_tx + delta_delta .put_bridge_account_rollup_id(&bob_address, [0; 32].into()) .unwrap(); - state_tx.put_allowed_fee_asset(&nria()).unwrap(); - state_tx + delta_delta.put_allowed_fee_asset(&nria()).unwrap(); + delta_delta .put_bridge_account_ibc_asset(&bob_address, nria()) .unwrap(); - app.apply(state_tx); + app.apply(delta_delta); let transfer_action = Transfer { to: bob_address, diff --git a/crates/astria-sequencer/src/assets/query.rs b/crates/astria-sequencer/src/assets/query.rs index ee3dad59d2..162edcb3a2 100644 --- a/crates/astria-sequencer/src/assets/query.rs +++ b/crates/astria-sequencer/src/assets/query.rs @@ -3,7 +3,6 @@ use astria_core::{ protocol::abci::AbciErrorCode, }; use astria_eyre::eyre::WrapErr as _; -use cnidarium::Storage; use hex::FromHex as _; use prost::Message as _; use tendermint::abci::{ @@ -15,6 +14,7 @@ use tendermint::abci::{ use crate::{ app::StateReadExt as _, assets::StateReadExt as _, + storage::Storage, }; // Retrieve the full asset denomination given the asset ID. diff --git a/crates/astria-sequencer/src/assets/state_ext.rs b/crates/astria-sequencer/src/assets/state_ext.rs index b2ca7d0ca0..068b8a7765 100644 --- a/crates/astria-sequencer/src/assets/state_ext.rs +++ b/crates/astria-sequencer/src/assets/state_ext.rs @@ -107,9 +107,8 @@ impl<T: StateWrite> StateWriteExt for T {} #[cfg(test)] mod tests { - use cnidarium::StateDelta; - use super::*; + use crate::storage::Storage; fn asset() -> asset::Denom { "asset".parse().unwrap() @@ -124,21 +123,20 @@ mod tests { #[tokio::test] async fn native_asset() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // doesn't exist at first assert!( - state.get_native_asset().await.unwrap().is_none(), + state_delta.get_native_asset().await.unwrap().is_none(), "no native asset denom should exist at first" ); // can write let denom_orig: asset::TracePrefixed = "denom_orig".parse().unwrap(); - state.put_native_asset(denom_orig.clone()).unwrap(); + state_delta.put_native_asset(denom_orig.clone()).unwrap(); assert_eq!( - state.get_native_asset().await.unwrap().expect( + state_delta.get_native_asset().await.unwrap().expect( "a native asset denomination was written and must exist inside the database" ), denom_orig, @@ -147,9 +145,9 @@ mod tests { // can write new value let denom_update: asset::TracePrefixed = "denom_update".parse().unwrap(); - state.put_native_asset(denom_update.clone()).unwrap(); + state_delta.put_native_asset(denom_update.clone()).unwrap(); assert_eq!( - state.get_native_asset().await.unwrap().expect( + state_delta.get_native_asset().await.unwrap().expect( "a native asset denomination update was written and must exist inside the database" ), denom_update, @@ -159,15 +157,14 @@ mod tests { #[tokio::test] async fn get_ibc_asset_non_existent() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); let asset = asset(); // gets for non existing assets should return none assert_eq!( - state + state_delta .map_ibc_to_trace_prefixed_asset(&asset.to_ibc_prefixed()) .await .expect("getting non existing asset should not fail"), @@ -177,28 +174,27 @@ mod tests { #[tokio::test] async fn has_ibc_asset() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let denom = asset(); // non existing calls are ok for 'has' assert!( - !state + !state_delta .has_ibc_asset(&denom) .await .expect("'has' for non existing ibc assets should be ok"), "query for non existing asset should return false" ); - state + state_delta .put_ibc_asset(denom.clone().unwrap_trace_prefixed()) .expect("putting ibc asset should not fail"); // existing calls are ok for 'has' assert!( - state + state_delta .has_ibc_asset(&denom) .await .expect("'has' for existing ibc assets should be ok"), @@ -208,17 +204,16 @@ mod tests { #[tokio::test] async fn put_ibc_asset_simple() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // can write new let denom = asset(); - state + state_delta .put_ibc_asset(denom.clone().unwrap_trace_prefixed()) .expect("putting ibc asset should not fail"); assert_eq!( - state + state_delta .map_ibc_to_trace_prefixed_asset(&denom.to_ibc_prefixed()) .await .unwrap() @@ -230,17 +225,16 @@ mod tests { #[tokio::test] async fn put_ibc_asset_complex() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // can write new let denom = asset_0(); - state + state_delta .put_ibc_asset(denom.clone().unwrap_trace_prefixed()) .expect("putting ibc asset should not fail"); assert_eq!( - state + state_delta .map_ibc_to_trace_prefixed_asset(&denom.to_ibc_prefixed()) .await .unwrap() @@ -251,11 +245,11 @@ mod tests { // can write another without affecting original let denom_1 = asset_1(); - state + state_delta .put_ibc_asset(denom_1.clone().unwrap_trace_prefixed()) .expect("putting ibc asset should not fail"); assert_eq!( - state + state_delta .map_ibc_to_trace_prefixed_asset(&denom_1.to_ibc_prefixed()) .await .unwrap() @@ -264,7 +258,7 @@ mod tests { "additional ibc asset was not what was expected" ); assert_eq!( - state + state_delta .map_ibc_to_trace_prefixed_asset(&denom.to_ibc_prefixed()) .await .unwrap() diff --git a/crates/astria-sequencer/src/assets/storage/values.rs b/crates/astria-sequencer/src/assets/storage/values.rs index 6fab66eeaa..434e7d481a 100644 --- a/crates/astria-sequencer/src/assets/storage/values.rs +++ b/crates/astria-sequencer/src/assets/storage/values.rs @@ -7,15 +7,15 @@ use borsh::{ BorshSerialize, }; -#[derive(Debug, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] pub(crate) struct Value<'a>(ValueImpl<'a>); -#[derive(Debug, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] enum ValueImpl<'a> { TracePrefixedDenom(TracePrefixedDenom<'a>), } -#[derive(Debug, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)] pub(in crate::assets) struct TracePrefixedDenom<'a> { trace: Vec<(Cow<'a, str>, Cow<'a, str>)>, base_denom: Cow<'a, str>, diff --git a/crates/astria-sequencer/src/authority/state_ext.rs b/crates/astria-sequencer/src/authority/state_ext.rs index e55b81fcbf..70b3ebb8bb 100644 --- a/crates/astria-sequencer/src/authority/state_ext.rs +++ b/crates/astria-sequencer/src/authority/state_ext.rs @@ -121,7 +121,6 @@ impl<T: StateWrite> StateWriteExt for T {} #[cfg(test)] mod tests { use astria_core::protocol::transaction::v1::action::ValidatorUpdate; - use cnidarium::StateDelta; use super::*; use crate::{ @@ -130,6 +129,7 @@ mod tests { verification_key, ASTRIA_PREFIX, }, + storage::Storage, }; fn empty_validator_set() -> ValidatorSet { @@ -138,25 +138,26 @@ mod tests { #[tokio::test] async fn sudo_address() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); // doesn't exist at first - let _ = state + let _ = state_delta .get_sudo_address() .await .expect_err("no sudo address should exist at first"); // can write new let mut address_expected = [42u8; ADDRESS_LEN]; - state + state_delta .put_sudo_address(address_expected) .expect("writing sudo address should not fail"); assert_eq!( - state + state_delta .get_sudo_address() .await .expect("a sudo address was written and must exist inside the database"), @@ -166,11 +167,11 @@ mod tests { // can rewrite with new value address_expected = [41u8; ADDRESS_LEN]; - state + state_delta .put_sudo_address(address_expected) .expect("writing sudo address should not fail"); assert_eq!( - state + state_delta .get_sudo_address() .await .expect("a new sudo address was written and must exist inside the database"), @@ -181,12 +182,11 @@ mod tests { #[tokio::test] async fn validator_set_uninitialized_fails() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); // doesn't exist at first - let _ = state + let _ = state_delta .get_validator_set() .await .expect_err("no validator set should exist at first"); @@ -194,9 +194,8 @@ mod tests { #[tokio::test] async fn put_validator_set() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let initial = vec![ValidatorUpdate { power: 10, @@ -205,11 +204,11 @@ mod tests { let initial_validator_set = ValidatorSet::new_from_updates(initial); // can write new - state + state_delta .put_validator_set(initial_validator_set.clone()) .expect("writing initial validator set should not fail"); assert_eq!( - state + state_delta .get_validator_set() .await .expect("a validator set was written and must exist inside the database"), @@ -223,11 +222,11 @@ mod tests { verification_key: verification_key(2), }]; let updated_validator_set = ValidatorSet::new_from_updates(updates); - state + state_delta .put_validator_set(updated_validator_set.clone()) .expect("writing update validator set should not fail"); assert_eq!( - state + state_delta .get_validator_set() .await .expect("a validator set was written and must exist inside the database"), @@ -238,13 +237,12 @@ mod tests { #[tokio::test] async fn get_validator_updates_empty() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); // querying for empty validator set is ok assert_eq!( - state + state_delta .get_validator_updates() .await .expect("if no updates have been written return empty set"), @@ -255,9 +253,8 @@ mod tests { #[tokio::test] async fn put_validator_updates() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // create update validator set let mut updates = vec![ @@ -273,11 +270,11 @@ mod tests { let mut validator_set_updates = ValidatorSet::new_from_updates(updates); // put validator updates - state + state_delta .put_validator_updates(validator_set_updates.clone()) .expect("writing update validator set should not fail"); assert_eq!( - state + state_delta .get_validator_updates() .await .expect("an update validator set was written and must exist inside the database"), @@ -300,11 +297,11 @@ mod tests { validator_set_updates = ValidatorSet::new_from_updates(updates); // write different updates - state + state_delta .put_validator_updates(validator_set_updates.clone()) .expect("writing update validator set should not fail"); assert_eq!( - state + state_delta .get_validator_updates() .await .expect("an update validator set was written and must exist inside the database"), @@ -315,9 +312,8 @@ mod tests { #[tokio::test] async fn clear_validator_updates() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // create update validator set let updates = vec![ValidatorUpdate { @@ -327,11 +323,11 @@ mod tests { let validator_set_updates = ValidatorSet::new_from_updates(updates); // put validator updates - state + state_delta .put_validator_updates(validator_set_updates.clone()) .expect("writing update validator set should not fail"); assert_eq!( - state + state_delta .get_validator_updates() .await .expect("an update validator set was written and must exist inside the database"), @@ -340,11 +336,11 @@ mod tests { ); // clear updates - state.clear_validator_updates(); + state_delta.clear_validator_updates(); // check that clear worked assert_eq!( - state + state_delta .get_validator_updates() .await .expect("if no updates have been written return empty set"), @@ -355,12 +351,11 @@ mod tests { #[tokio::test] async fn clear_validator_updates_empty_ok() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // able to clear non-existent updates with no error - state.clear_validator_updates(); + state_delta.clear_validator_updates(); } #[tokio::test] diff --git a/crates/astria-sequencer/src/bridge/query.rs b/crates/astria-sequencer/src/bridge/query.rs index ed5d1c5177..71810e2241 100644 --- a/crates/astria-sequencer/src/bridge/query.rs +++ b/crates/astria-sequencer/src/bridge/query.rs @@ -9,7 +9,6 @@ use astria_eyre::eyre::{ eyre, WrapErr as _, }; -use cnidarium::Storage; use prost::Message as _; use tendermint::abci::{ request, @@ -22,6 +21,10 @@ use crate::{ app::StateReadExt as _, assets::StateReadExt as _, bridge::StateReadExt as _, + storage::{ + Snapshot, + Storage, + }, }; fn error_query_response( @@ -45,7 +48,7 @@ fn error_query_response( // this could be significantly shortened. #[expect(clippy::too_many_lines, reason = "should be refactored")] async fn get_bridge_account_info( - snapshot: cnidarium::Snapshot, + snapshot: Snapshot, address: &Address, ) -> Result<Option<BridgeAccountInfo>, response::Query> { let rollup_id = match snapshot.get_bridge_account_rollup_id(address).await { @@ -297,7 +300,6 @@ mod tests { primitive::v1::RollupId, protocol::bridge::v1::BridgeAccountInfoResponse, }; - use cnidarium::StateDelta; use super::*; use crate::{ @@ -313,34 +315,35 @@ mod tests { #[tokio::test] async fn bridge_account_info_request_ok() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); let asset: astria_core::primitive::v1::asset::Denom = "test".parse().unwrap(); let rollup_id = RollupId::from_unhashed_bytes("test"); let bridge_address = astria_address(&[0u8; 20]); let sudo_address = astria_address(&[1u8; 20]); let withdrawer_address = astria_address(&[2u8; 20]); - state.put_block_height(1).unwrap(); - state + state_delta.put_block_height(1).unwrap(); + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state + state_delta .put_ibc_asset(asset.as_trace_prefixed().unwrap().clone()) .unwrap(); - state + state_delta .put_bridge_account_ibc_asset(&bridge_address, &asset) .unwrap(); - state + state_delta .put_bridge_account_sudo_address(&bridge_address, sudo_address) .unwrap(); - state + state_delta .put_bridge_account_withdrawer_address(&bridge_address, withdrawer_address) .unwrap(); - storage.commit(state).await.unwrap(); + storage.commit(state_delta).await.unwrap(); let query = request::Query { data: vec![].into(), diff --git a/crates/astria-sequencer/src/bridge/state_ext.rs b/crates/astria-sequencer/src/bridge/state_ext.rs index 68bd6c2925..ae8606c503 100644 --- a/crates/astria-sequencer/src/bridge/state_ext.rs +++ b/crates/astria-sequencer/src/bridge/state_ext.rs @@ -330,10 +330,11 @@ impl<T: StateWrite> StateWriteExt for T {} #[cfg(test)] mod tests { - use cnidarium::StateDelta; - use super::*; - use crate::benchmark_and_test_utils::astria_address; + use crate::{ + benchmark_and_test_utils::astria_address, + storage::Storage, + }; fn asset_0() -> asset::Denom { "asset_0".parse().unwrap() @@ -345,17 +346,20 @@ mod tests { #[tokio::test] async fn get_bridge_account_rollup_id_uninitialized_ok() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); let address = astria_address(&[42u8; 20]); // uninitialized ok assert_eq!( - state.get_bridge_account_rollup_id(&address).await.expect( - "call to get bridge account rollup id should not fail for uninitialized addresses" - ), + state_delta + .get_bridge_account_rollup_id(&address) + .await + .expect( + "call to get bridge account rollup id should not fail for uninitialized \ + addresses" + ), Option::None, "stored rollup id for bridge not what was expected" ); @@ -363,19 +367,18 @@ mod tests { #[tokio::test] async fn put_bridge_account_rollup_id() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let mut rollup_id = RollupId::new([1u8; 32]); let address = astria_address(&[42u8; 20]); // can write new - state + state_delta .put_bridge_account_rollup_id(&address, rollup_id) .unwrap(); assert_eq!( - state + state_delta .get_bridge_account_rollup_id(&address) .await .expect("a rollup ID was written and must exist inside the database") @@ -386,11 +389,11 @@ mod tests { // can rewrite with new value rollup_id = RollupId::new([2u8; 32]); - state + state_delta .put_bridge_account_rollup_id(&address, rollup_id) .unwrap(); assert_eq!( - state + state_delta .get_bridge_account_rollup_id(&address) .await .expect("a rollup ID was written and must exist inside the database") @@ -402,11 +405,11 @@ mod tests { // can write additional account and both valid let rollup_id_1 = RollupId::new([2u8; 32]); let address_1 = astria_address(&[41u8; 20]); - state + state_delta .put_bridge_account_rollup_id(&address_1, rollup_id_1) .unwrap(); assert_eq!( - state + state_delta .get_bridge_account_rollup_id(&address_1) .await .expect("a rollup ID was written and must exist inside the database") @@ -416,7 +419,7 @@ mod tests { ); assert_eq!( - state + state_delta .get_bridge_account_rollup_id(&address) .await .expect("a rollup ID was written and must exist inside the database") @@ -428,12 +431,11 @@ mod tests { #[tokio::test] async fn get_bridge_account_asset_id_none_should_fail() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); let address = astria_address(&[42u8; 20]); - let _ = state + let _ = state_delta .get_bridge_account_ibc_asset(&address) .await .expect_err("call to get bridge account asset ids should fail if no assets"); @@ -441,18 +443,17 @@ mod tests { #[tokio::test] async fn put_bridge_account_ibc_assets() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let address = astria_address(&[42u8; 20]); let mut asset = asset_0(); // can write - state + state_delta .put_bridge_account_ibc_asset(&address, asset.clone()) .expect("storing bridge account asset should not fail"); - let mut result = state + let mut result = state_delta .get_bridge_account_ibc_asset(&address) .await .expect("bridge asset id was written and must exist inside the database"); @@ -464,10 +465,10 @@ mod tests { // can update asset = "asset_2".parse::<asset::Denom>().unwrap(); - state + state_delta .put_bridge_account_ibc_asset(&address, &asset) .expect("storing bridge account assets should not fail"); - result = state + result = state_delta .get_bridge_account_ibc_asset(&address) .await .expect("bridge asset id was written and must exist inside the database"); @@ -480,18 +481,18 @@ mod tests { // writing to other account also ok let address_1 = astria_address(&[41u8; 20]); let asset_1 = asset_1(); - state + state_delta .put_bridge_account_ibc_asset(&address_1, &asset_1) .expect("storing bridge account assets should not fail"); assert_eq!( - state + state_delta .get_bridge_account_ibc_asset(&address_1) .await .expect("bridge asset id was written and must exist inside the database"), asset_1.into(), "second bridge account asset not what was expected" ); - result = state + result = state_delta .get_bridge_account_ibc_asset(&address) .await .expect("original bridge asset id was written and must exist inside the database"); @@ -505,16 +506,15 @@ mod tests { #[tokio::test] async fn bridge_account_sudo_address_round_trip() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let bridge_address = [1; 20]; let sudo_address = [2; 20]; - state + state_delta .put_bridge_account_sudo_address(&bridge_address, sudo_address) .unwrap(); - let retrieved_sudo_address = state + let retrieved_sudo_address = state_delta .get_bridge_account_sudo_address(&bridge_address) .await .unwrap(); @@ -523,16 +523,15 @@ mod tests { #[tokio::test] async fn bridge_account_withdrawer_address_round_trip() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let bridge_address = [1; 20]; let withdrawer_address = [2; 20]; - state + state_delta .put_bridge_account_withdrawer_address(&bridge_address, withdrawer_address) .unwrap(); - let retrieved_withdrawer_address = state + let retrieved_withdrawer_address = state_delta .get_bridge_account_withdrawer_address(&bridge_address) .await .unwrap(); @@ -541,16 +540,15 @@ mod tests { #[tokio::test] async fn get_deposits_empty_ok() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); let block_hash = [32; 32]; let rollup_id = RollupId::new([2u8; 32]); // no events ok assert_eq!( - state + state_delta .get_deposits(&block_hash, &rollup_id) .await .expect("call for rollup id with no deposit events should not fail"), @@ -561,9 +559,8 @@ mod tests { #[tokio::test] async fn get_deposits() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let block_hash = [32; 32]; let rollup_id_1 = RollupId::new([1u8; 32]); @@ -587,11 +584,11 @@ mod tests { all_deposits.insert(rollup_id_1, rollup_1_deposits.clone()); // can write - state + state_delta .put_deposits(&block_hash, all_deposits.clone()) .unwrap(); assert_eq!( - state + state_delta .get_deposits(&block_hash, &rollup_id_1) .await .expect("deposit info was written to the database and must exist"), @@ -607,11 +604,11 @@ mod tests { }; rollup_1_deposits.push(deposit.clone()); all_deposits.insert(rollup_id_1, rollup_1_deposits.clone()); - state + state_delta .put_deposits(&block_hash, all_deposits.clone()) .unwrap(); assert_eq!( - state + state_delta .get_deposits(&block_hash, &rollup_id_1) .await .expect("deposit info was written to the database and must exist"), @@ -628,9 +625,9 @@ mod tests { }; let rollup_2_deposits = vec![deposit.clone()]; all_deposits.insert(rollup_id_2, rollup_2_deposits.clone()); - state.put_deposits(&block_hash, all_deposits).unwrap(); + state_delta.put_deposits(&block_hash, all_deposits).unwrap(); assert_eq!( - state + state_delta .get_deposits(&block_hash, &rollup_id_2) .await .expect("deposit info was written to the database and must exist"), @@ -639,7 +636,7 @@ mod tests { ); // verify original still ok assert_eq!( - state + state_delta .get_deposits(&block_hash, &rollup_id_1) .await .expect("deposit info was written to the database and must exist"), @@ -650,16 +647,15 @@ mod tests { #[tokio::test] async fn last_transaction_id_for_bridge_account_round_trip() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let bridge_address = [1; 20]; let tx_hash = TransactionId::new([2; 32]); - state + state_delta .put_last_transaction_id_for_bridge_account(&bridge_address, tx_hash) .unwrap(); - let retrieved_tx_hash = state + let retrieved_tx_hash = state_delta .get_last_transaction_id_for_bridge_account(&bridge_address) .await .unwrap(); diff --git a/crates/astria-sequencer/src/fees/query.rs b/crates/astria-sequencer/src/fees/query.rs index d4b28dc768..08faaaa535 100644 --- a/crates/astria-sequencer/src/fees/query.rs +++ b/crates/astria-sequencer/src/fees/query.rs @@ -42,10 +42,7 @@ use astria_eyre::eyre::{ Report, WrapErr as _, }; -use cnidarium::{ - StateRead, - Storage, -}; +use cnidarium::StateRead; use futures::{ FutureExt as _, StreamExt as _, @@ -77,7 +74,10 @@ use crate::{ FeeHandler, StateReadExt as _, }, - storage::StoredValue, + storage::{ + Storage, + StoredValue, + }, }; async fn find_trace_prefixed_or_return_ibc<S: StateRead>( diff --git a/crates/astria-sequencer/src/fees/state_ext.rs b/crates/astria-sequencer/src/fees/state_ext.rs index d62d682f86..edcc9da62a 100644 --- a/crates/astria-sequencer/src/fees/state_ext.rs +++ b/crates/astria-sequencer/src/fees/state_ext.rs @@ -222,7 +222,6 @@ mod tests { }; use astria_core::protocol::transaction::v1::action::*; - use cnidarium::StateDelta; use futures::{ StreamExt as _, TryStreamExt as _, @@ -231,7 +230,10 @@ mod tests { use tokio::pin; use super::*; - use crate::app::benchmark_and_test_utils::initialize_app_with_storage; + use crate::{ + app::benchmark_and_test_utils::initialize_app_with_storage, + storage::Storage, + }; fn asset_0() -> asset::Denom { "asset_0".parse().unwrap() @@ -248,20 +250,19 @@ mod tests { #[tokio::test] async fn block_fee_read_and_increase() { let (_, storage) = initialize_app_with_storage(None, vec![]).await; - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let mut state_delta = storage.new_delta_of_latest_snapshot(); // doesn't exist at first - let fee_balances_orig = state.get_block_fees(); + let fee_balances_orig = state_delta.get_block_fees(); assert!(fee_balances_orig.is_empty()); // can write let asset = asset_0(); let amount = 100u128; - state.add_fee_to_block_fees::<_, Transfer>(&asset, amount, 0); + state_delta.add_fee_to_block_fees::<_, Transfer>(&asset, amount, 0); // holds expected - let fee_balances_updated = state.get_block_fees(); + let fee_balances_updated = state_delta.get_block_fees(); assert_eq!( fee_balances_updated[0], Fee { @@ -277,8 +278,7 @@ mod tests { #[tokio::test] async fn block_fee_read_and_increase_can_delete() { let (_, storage) = initialize_app_with_storage(None, vec![]).await; - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let mut state_delta = storage.new_delta_of_latest_snapshot(); // can write let asset_first = asset_0(); @@ -286,10 +286,10 @@ mod tests { let amount_first = 100u128; let amount_second = 200u128; - state.add_fee_to_block_fees::<_, Transfer>(&asset_first, amount_first, 0); - state.add_fee_to_block_fees::<_, Transfer>(&asset_second, amount_second, 1); + state_delta.add_fee_to_block_fees::<_, Transfer>(&asset_first, amount_first, 0); + state_delta.add_fee_to_block_fees::<_, Transfer>(&asset_second, amount_second, 1); // holds expected - let fee_balances = HashSet::<_>::from_iter(state.get_block_fees()); + let fee_balances = HashSet::<_>::from_iter(state_delta.get_block_fees()); assert_eq!( fee_balances, HashSet::from_iter(vec![ @@ -316,13 +316,12 @@ mod tests { FeeComponents<F>: TryFrom<StoredValue<'a>, Error = Report> + Debug, StoredValue<'a>: From<FeeComponents<F>>, { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let fee_components = FeeComponents::<F>::new(123, 1); - state.put_fees(fee_components).unwrap(); - let retrieved_fees = state.get_fees().await.unwrap(); + state_delta.put_fees(fee_components).unwrap(); + let retrieved_fees = state_delta.get_fees().await.unwrap(); assert_eq!(retrieved_fees, Some(fee_components)); } @@ -398,14 +397,13 @@ mod tests { #[tokio::test] async fn is_allowed_fee_asset() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // non-existent fees assets return false let asset = asset_0(); assert!( - !state + !state_delta .is_allowed_fee_asset(&asset) .await .expect("checking for allowed fee asset should not fail"), @@ -413,9 +411,9 @@ mod tests { ); // existent fee assets return true - state.put_allowed_fee_asset(&asset).unwrap(); + state_delta.put_allowed_fee_asset(&asset).unwrap(); assert!( - state + state_delta .is_allowed_fee_asset(&asset) .await .expect("checking for allowed fee asset should not fail"), @@ -425,15 +423,14 @@ mod tests { #[tokio::test] async fn can_delete_allowed_fee_assets_simple() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // setup fee asset let asset = asset_0(); - state.put_allowed_fee_asset(&asset).unwrap(); + state_delta.put_allowed_fee_asset(&asset).unwrap(); assert!( - state + state_delta .is_allowed_fee_asset(&asset) .await .expect("checking for allowed fee asset should not fail"), @@ -442,7 +439,7 @@ mod tests { // see can get fee asset pin!( - let assets = state.allowed_fee_assets(); + let assets = state_delta.allowed_fee_assets(); ); assert_eq!( assets.next().await.transpose().unwrap(), @@ -451,11 +448,11 @@ mod tests { ); // can delete - state.delete_allowed_fee_asset(&asset); + state_delta.delete_allowed_fee_asset(&asset); // see is deleted pin!( - let assets = state.allowed_fee_assets(); + let assets = state_delta.allowed_fee_assets(); ); assert_eq!( assets.next().await.transpose().unwrap(), @@ -466,33 +463,32 @@ mod tests { #[tokio::test] async fn can_delete_allowed_fee_assets_complex() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // setup fee assets let asset_first = asset_0(); - state.put_allowed_fee_asset(&asset_first).unwrap(); + state_delta.put_allowed_fee_asset(&asset_first).unwrap(); assert!( - state + state_delta .is_allowed_fee_asset(&asset_first) .await .expect("checking for allowed fee asset should not fail"), "fee asset was expected to be allowed" ); let asset_second = asset_1(); - state.put_allowed_fee_asset(&asset_second).unwrap(); + state_delta.put_allowed_fee_asset(&asset_second).unwrap(); assert!( - state + state_delta .is_allowed_fee_asset(&asset_second) .await .expect("checking for allowed fee asset should not fail"), "fee asset was expected to be allowed" ); let asset_third = asset_2(); - state.put_allowed_fee_asset(&asset_third).unwrap(); + state_delta.put_allowed_fee_asset(&asset_third).unwrap(); assert!( - state + state_delta .is_allowed_fee_asset(&asset_third) .await .expect("checking for allowed fee asset should not fail"), @@ -500,10 +496,10 @@ mod tests { ); // can delete - state.delete_allowed_fee_asset(&asset_second); + state_delta.delete_allowed_fee_asset(&asset_second); // see is deleted - let assets = state + let assets = state_delta .allowed_fee_assets() .try_collect::<HashSet<_>>() .await diff --git a/crates/astria-sequencer/src/fees/tests.rs b/crates/astria-sequencer/src/fees/tests.rs index 8a6f2202ff..386bbcfcd7 100644 --- a/crates/astria-sequencer/src/fees/tests.rs +++ b/crates/astria-sequencer/src/fees/tests.rs @@ -26,7 +26,6 @@ use astria_core::{ sequencerblock::v1::block::Deposit, Protobuf as _, }; -use cnidarium::StateDelta; use super::base_deposit_fee; use crate::{ @@ -56,6 +55,7 @@ use crate::{ StateWriteExt as _, DEPOSIT_BASE_FEE, }, + storage::Storage, test_utils::calculate_rollup_data_submission_fee_from_state, transaction::{ StateWriteExt as _, @@ -70,10 +70,9 @@ fn test_asset() -> asset::Denom { #[tokio::test] async fn ensure_correct_block_fees_transfer() { let (_, storage) = initialize_app_with_storage(None, vec![]).await; - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let mut state_delta = storage.new_delta_of_latest_snapshot(); let transfer_base = 1; - state + state_delta .put_fees(FeeComponents::<Transfer>::new(transfer_base, 0)) .unwrap(); @@ -95,9 +94,9 @@ async fn ensure_correct_block_fees_transfer() { .try_build() .unwrap(); let signed_tx = Arc::new(tx.sign(&alice)); - signed_tx.check_and_execute(&mut state).await.unwrap(); + signed_tx.check_and_execute(&mut state_delta).await.unwrap(); - let total_block_fees: u128 = state + let total_block_fees: u128 = state_delta .get_block_fees() .into_iter() .map(|fee| fee.amount()) @@ -108,9 +107,8 @@ async fn ensure_correct_block_fees_transfer() { #[tokio::test] async fn ensure_correct_block_fees_sequence() { let (_, storage) = initialize_app_with_storage(None, vec![]).await; - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); - state + let mut state_delta = storage.new_delta_of_latest_snapshot(); + state_delta .put_fees(FeeComponents::<RollupDataSubmission>::new(1, 1)) .unwrap(); @@ -132,23 +130,22 @@ async fn ensure_correct_block_fees_sequence() { .try_build() .unwrap(); let signed_tx = Arc::new(tx.sign(&alice)); - signed_tx.check_and_execute(&mut state).await.unwrap(); - let total_block_fees: u128 = state + signed_tx.check_and_execute(&mut state_delta).await.unwrap(); + let total_block_fees: u128 = state_delta .get_block_fees() .into_iter() .map(|fee| fee.amount()) .sum(); - let expected_fees = calculate_rollup_data_submission_fee_from_state(&data, &state).await; + let expected_fees = calculate_rollup_data_submission_fee_from_state(&data, &state_delta).await; assert_eq!(total_block_fees, expected_fees); } #[tokio::test] async fn ensure_correct_block_fees_init_bridge_acct() { let (_, storage) = initialize_app_with_storage(None, vec![]).await; - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let mut state_delta = storage.new_delta_of_latest_snapshot(); let init_bridge_account_base = 1; - state + state_delta .put_fees(FeeComponents::<InitBridgeAccount>::new( init_bridge_account_base, 0, @@ -174,9 +171,9 @@ async fn ensure_correct_block_fees_init_bridge_acct() { .try_build() .unwrap(); let signed_tx = Arc::new(tx.sign(&alice)); - signed_tx.check_and_execute(&mut state).await.unwrap(); + signed_tx.check_and_execute(&mut state_delta).await.unwrap(); - let total_block_fees: u128 = state + let total_block_fees: u128 = state_delta .get_block_fees() .into_iter() .map(|fee| fee.amount()) @@ -193,25 +190,24 @@ async fn ensure_correct_block_fees_bridge_lock() { let starting_index_of_action = 0; let (_, storage) = initialize_app_with_storage(None, vec![]).await; - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let mut state_delta = storage.new_delta_of_latest_snapshot(); let transfer_base = 1; let bridge_lock_byte_cost_multiplier = 1; - state + state_delta .put_fees(FeeComponents::<Transfer>::new(transfer_base, 0)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<BridgeLock>::new( transfer_base, bridge_lock_byte_cost_multiplier, )) .unwrap(); - state + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state + state_delta .put_bridge_account_ibc_asset(&bridge_address, nria()) .unwrap(); @@ -232,7 +228,7 @@ async fn ensure_correct_block_fees_bridge_lock() { .try_build() .unwrap(); let signed_tx = Arc::new(tx.sign(&alice)); - signed_tx.check_and_execute(&mut state).await.unwrap(); + signed_tx.check_and_execute(&mut state_delta).await.unwrap(); let test_deposit = Deposit { bridge_address, @@ -244,7 +240,7 @@ async fn ensure_correct_block_fees_bridge_lock() { source_action_index: starting_index_of_action, }; - let total_block_fees: u128 = state + let total_block_fees: u128 = state_delta .get_block_fees() .into_iter() .map(|fee| fee.amount()) @@ -263,17 +259,16 @@ async fn ensure_correct_block_fees_bridge_sudo_change() { let bridge_address = astria_address(&bridge.address_bytes()); let (_, storage) = initialize_app_with_storage(None, vec![]).await; - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let mut state_delta = storage.new_delta_of_latest_snapshot(); let sudo_change_base = 1; - state + state_delta .put_fees(FeeComponents::<BridgeSudoChange>::new(sudo_change_base, 0)) .unwrap(); - state + state_delta .put_bridge_account_sudo_address(&bridge_address, alice_address) .unwrap(); - state + state_delta .increase_balance(&bridge_address, &nria(), 1) .await .unwrap(); @@ -294,9 +289,9 @@ async fn ensure_correct_block_fees_bridge_sudo_change() { .try_build() .unwrap(); let signed_tx = Arc::new(tx.sign(&alice)); - signed_tx.check_and_execute(&mut state).await.unwrap(); + signed_tx.check_and_execute(&mut state_delta).await.unwrap(); - let total_block_fees: u128 = state + let total_block_fees: u128 = state_delta .get_block_fees() .into_iter() .map(|fee| fee.amount()) @@ -306,24 +301,25 @@ async fn ensure_correct_block_fees_bridge_sudo_change() { #[tokio::test] async fn bridge_lock_fee_calculation_works_as_expected() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let transfer_fee = 12; let from_address = astria_address(&[2; 20]); let transaction_id = TransactionId::new([0; 32]); - state.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: from_address.bytes(), transaction_id, position_in_transaction: 0, }); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); - state + state_delta .put_fees(FeeComponents::<Transfer>::new(transfer_fee, 0)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<BridgeLock>::new(transfer_fee, 2)) .unwrap(); @@ -338,29 +334,35 @@ async fn bridge_lock_fee_calculation_works_as_expected() { }; let rollup_id = RollupId::from_unhashed_bytes(b"test_rollup_id"); - state + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state + state_delta .put_bridge_account_ibc_asset(&bridge_address, asset.clone()) .unwrap(); - state.put_allowed_fee_asset(&asset).unwrap(); + state_delta.put_allowed_fee_asset(&asset).unwrap(); // not enough balance; should fail - state + state_delta .put_account_balance(&from_address, &asset, transfer_fee) .unwrap(); assert_eyre_error( - &bridge_lock.check_and_execute(&mut state).await.unwrap_err(), + &bridge_lock + .check_and_execute(&mut state_delta) + .await + .unwrap_err(), "insufficient funds for transfer", ); // enough balance; should pass let expected_deposit_fee = transfer_fee + base_deposit_fee(&asset, "someaddress") * 2; - state + state_delta .put_account_balance(&from_address, &asset, 100 + expected_deposit_fee) .unwrap(); - bridge_lock.check_and_execute(&mut state).await.unwrap(); + bridge_lock + .check_and_execute(&mut state_delta) + .await + .unwrap(); } #[test] diff --git a/crates/astria-sequencer/src/grpc/sequencer.rs b/crates/astria-sequencer/src/grpc/sequencer.rs index dbe25337c2..3df2d4f4f8 100644 --- a/crates/astria-sequencer/src/grpc/sequencer.rs +++ b/crates/astria-sequencer/src/grpc/sequencer.rs @@ -14,7 +14,6 @@ use astria_core::{ Protobuf, }; use bytes::Bytes; -use cnidarium::Storage; use tonic::{ Request, Response, @@ -30,6 +29,7 @@ use crate::{ app::StateReadExt as _, grpc::StateReadExt as _, mempool::Mempool, + storage::Storage, }; pub(crate) struct SequencerServer { @@ -225,7 +225,6 @@ mod tests { protocol::test_utils::ConfigureSequencerBlock, sequencerblock::v1::SequencerBlock, }; - use cnidarium::StateDelta; use telemetry::Metrics; use super::*; @@ -253,13 +252,13 @@ mod tests { #[tokio::test] async fn test_get_sequencer_block() { let block = make_test_sequencer_block(1); - let storage = cnidarium::TempStorage::new().await.unwrap(); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); let mempool = Mempool::new(metrics, 100); - let mut state_tx = StateDelta::new(storage.latest_snapshot()); - state_tx.put_block_height(1).unwrap(); - state_tx.put_sequencer_block(block).unwrap(); - storage.commit(state_tx).await.unwrap(); + state_delta.put_block_height(1).unwrap(); + state_delta.put_sequencer_block(block).unwrap(); + storage.commit(state_delta).await.unwrap(); let server = Arc::new(SequencerServer::new(storage.clone(), mempool)); let request = GetSequencerBlockRequest { @@ -272,7 +271,7 @@ mod tests { #[tokio::test] async fn get_pending_nonce_in_mempool() { - let storage = cnidarium::TempStorage::new().await.unwrap(); + let storage = Storage::new_temp().await; let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); let mempool = Mempool::new(metrics, 100); @@ -323,14 +322,14 @@ mod tests { async fn get_pending_nonce_in_storage() { use crate::accounts::StateWriteExt as _; - let storage = cnidarium::TempStorage::new().await.unwrap(); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); let mempool = Mempool::new(metrics, 100); - let mut state_tx = StateDelta::new(storage.latest_snapshot()); let alice = get_alice_signing_key(); let alice_address = astria_address(&alice.address_bytes()); - state_tx.put_account_nonce(&alice_address, 99).unwrap(); - storage.commit(state_tx).await.unwrap(); + state_delta.put_account_nonce(&alice_address, 99).unwrap(); + storage.commit(state_delta).await.unwrap(); let server = Arc::new(SequencerServer::new(storage.clone(), mempool)); let request = GetPendingNonceRequest { diff --git a/crates/astria-sequencer/src/grpc/state_ext.rs b/crates/astria-sequencer/src/grpc/state_ext.rs index fc377d48c1..2f8b94d2e4 100644 --- a/crates/astria-sequencer/src/grpc/state_ext.rs +++ b/crates/astria-sequencer/src/grpc/state_ext.rs @@ -338,11 +338,13 @@ mod tests { protocol::test_utils::ConfigureSequencerBlock, sequencerblock::v1::block::Deposit, }; - use cnidarium::StateDelta; use rand::Rng; use super::*; - use crate::benchmark_and_test_utils::astria_address; + use crate::{ + benchmark_and_test_utils::astria_address, + storage::Storage, + }; // creates new sequencer block, optionally shifting all values except the height by 1 fn make_test_sequencer_block(height: u32) -> SequencerBlock { @@ -380,18 +382,17 @@ mod tests { #[tokio::test] async fn put_sequencer_block() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // can write one let block_0 = make_test_sequencer_block(2u32); - state + state_delta .put_sequencer_block(block_0.clone()) .expect("writing block to database should work"); assert_eq!( - state + state_delta .get_sequencer_block_by_height(block_0.height().into()) .await .expect("a block was written to the database and should exist"), @@ -401,11 +402,11 @@ mod tests { // can write another and both are ok let block_1 = make_test_sequencer_block(3u32); - state + state_delta .put_sequencer_block(block_1.clone()) .expect("writing another block to database should work"); assert_eq!( - state + state_delta .get_sequencer_block_by_height(block_0.height().into()) .await .expect("a block was written to the database and should exist"), @@ -413,7 +414,7 @@ mod tests { "original stored block does not match expected" ); assert_eq!( - state + state_delta .get_sequencer_block_by_height(block_1.height().into()) .await .expect("a block was written to the database and should exist"), @@ -424,17 +425,16 @@ mod tests { #[tokio::test] async fn put_sequencer_block_update() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // write original block let mut block = make_test_sequencer_block(2u32); - state + state_delta .put_sequencer_block(block.clone()) .expect("writing block to database should work"); assert_eq!( - state + state_delta .get_sequencer_block_by_height(block.height().into()) .await .expect("a block was written to the database and should exist"), @@ -444,13 +444,13 @@ mod tests { // write to same height but with new values block = make_test_sequencer_block(2u32); - state + state_delta .put_sequencer_block(block.clone()) .expect("writing block update to database should work"); // block was updates assert_eq!( - state + state_delta .get_sequencer_block_by_height(block.height().into()) .await .expect("a block was written to the database and should exist"), @@ -461,19 +461,18 @@ mod tests { #[tokio::test] async fn get_block_hash_by_height() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // write block let block = make_test_sequencer_block(2u32); - state + state_delta .put_sequencer_block(block.clone()) .expect("writing block to database should work"); // grab block hash by block height assert_eq!( - state + state_delta .get_block_hash_by_height(block.height().into()) .await .expect( @@ -487,19 +486,18 @@ mod tests { #[tokio::test] async fn get_sequencer_block_header_by_hash() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // write block let block = make_test_sequencer_block(2u32); - state + state_delta .put_sequencer_block(block.clone()) .expect("writing block to database should work"); // grab block header by block hash assert_eq!( - state + state_delta .get_sequencer_block_header_by_hash(block.block_hash()) .await .expect( @@ -513,18 +511,17 @@ mod tests { #[tokio::test] async fn get_rollup_ids_by_block_hash() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // write block let block = make_test_sequencer_block(2u32); - state + state_delta .put_sequencer_block(block.clone()) .expect("writing block to database should work"); // grab rollup ids by block hash - let stored_rollup_ids = state + let stored_rollup_ids = state_delta .get_rollup_ids_by_block_hash(block.block_hash()) .await .expect( @@ -540,19 +537,18 @@ mod tests { #[tokio::test] async fn get_sequencer_block_by_hash() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // write block let block = make_test_sequencer_block(2u32); - state + state_delta .put_sequencer_block(block.clone()) .expect("writing block to database should work"); // grab block by block hash assert_eq!( - super::get_sequencer_block_by_hash(&state, block.block_hash()) + super::get_sequencer_block_by_hash(&state_delta, block.block_hash()) .await .expect( "a block was written to the database and we should be able to query its block \ @@ -565,13 +561,12 @@ mod tests { #[tokio::test] async fn get_rollup_data() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // write block let block = make_test_sequencer_block(2u32); - state + state_delta .put_sequencer_block(block.clone()) .expect("writing block to database should work"); @@ -584,7 +579,7 @@ mod tests { let rollup_data = block.rollup_transactions().get(&rollup_id).unwrap(); // grab rollup's data by block hash - let stored_rollup_data = state + let stored_rollup_data = state_delta .get_rollup_data(block.block_hash(), &rollup_id) .await .expect( @@ -599,16 +594,15 @@ mod tests { #[tokio::test] async fn get_rollup_transactions_proof_by_block_hash() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let block = make_test_sequencer_block(2u32); - state + state_delta .put_sequencer_block(block.clone()) .expect("writing block to database should work"); - let transactions_proof = state + let transactions_proof = state_delta .get_rollup_transactions_proof_by_block_hash(block.block_hash()) .await .expect("should have txs proof in state"); @@ -617,16 +611,15 @@ mod tests { #[tokio::test] async fn get_rollup_ids_proof_by_block_hash() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let block = make_test_sequencer_block(2u32); - state + state_delta .put_sequencer_block(block.clone()) .expect("writing block to database should work"); - let ids_proof = state + let ids_proof = state_delta .get_rollup_ids_proof_by_block_hash(block.block_hash()) .await .expect("should have ids proof in state"); diff --git a/crates/astria-sequencer/src/ibc/ics20_transfer.rs b/crates/astria-sequencer/src/ibc/ics20_transfer.rs index c94b87ed0f..e082ec3a8f 100644 --- a/crates/astria-sequencer/src/ibc/ics20_transfer.rs +++ b/crates/astria-sequencer/src/ibc/ics20_transfer.rs @@ -755,7 +755,6 @@ mod tests { }, sequencerblock::v1::block::Deposit, }; - use cnidarium::StateDelta; use ibc_types::{ core::channel::{ packet::Sequence, @@ -790,6 +789,7 @@ mod tests { StateReadExt as _, StateWriteExt, }, + storage::Storage, test_utils::astria_compat_address, transaction::{ StateWriteExt as _, @@ -826,18 +826,19 @@ mod tests { #[tokio::test] async fn receive_source_zone_asset_on_sequencer_account() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let recipient_address = astria_address(&[1; 20]); let amount = 100; - state_tx.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state_tx + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta .put_ibc_compat_prefix(ASTRIA_COMPAT_PREFIX.to_string()) .unwrap(); - state_tx + state_delta .put_ibc_channel_balance(&packet().chan_on_b, &nria(), amount) .unwrap(); @@ -850,7 +851,7 @@ mod tests { }; receive_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_data).unwrap(), ..packet() @@ -859,12 +860,12 @@ mod tests { .await .unwrap(); - let user_balance = state_tx + let user_balance = state_delta .get_account_balance(&recipient_address, &nria()) .await .expect("ics20 transfer to user account should succeed"); assert_eq!(user_balance, amount); - let escrow_balance = state_tx + let escrow_balance = state_delta .get_ibc_channel_balance(&packet().chan_on_b, &nria()) .await .expect("ics20 transfer to user account from escrow account should succeed"); @@ -873,12 +874,13 @@ mod tests { #[tokio::test] async fn receive_sink_zone_asset_on_sequencer_account() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state_tx.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state_tx + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta .put_ibc_compat_prefix(ASTRIA_COMPAT_PREFIX.to_string()) .unwrap(); @@ -896,7 +898,7 @@ mod tests { }; receive_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_data).unwrap(), ..packet() @@ -905,11 +907,11 @@ mod tests { .await .unwrap(); - assert!(state_tx.has_ibc_asset(&sink_asset()).await.expect( + assert!(state_delta.has_ibc_asset(&sink_asset()).await.expect( "a new asset with <sequencer_port>/<sequencer_channel>/<asset> should be registered \ in the state" )); - let user_balance = state_tx + let user_balance = state_delta .get_account_balance(&recipient_address, &sink_asset()) .await .expect( @@ -920,18 +922,19 @@ mod tests { #[tokio::test] async fn receive_source_zone_asset_on_bridge_account_and_emit_to_rollup() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let bridge_address = astria_address(&[99; 20]); let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); - state_tx.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state_tx + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta .put_ibc_compat_prefix(ASTRIA_COMPAT_PREFIX.to_string()) .unwrap(); - state_tx.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: bridge_address.bytes(), transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, @@ -940,13 +943,13 @@ mod tests { let rollup_deposit_address = "rollupaddress"; let amount = 100; - state_tx + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + state_delta .put_bridge_account_ibc_asset(&bridge_address, nria()) .unwrap(); - state_tx + state_delta .put_ibc_channel_balance(&packet().chan_on_b, &nria(), amount) .unwrap(); @@ -961,7 +964,7 @@ mod tests { .unwrap(), }; receive_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_data).unwrap(), ..packet() @@ -970,7 +973,7 @@ mod tests { .await .unwrap(); - let balance = state_tx + let balance = state_delta .get_account_balance(&bridge_address, &nria()) .await .expect( @@ -979,7 +982,7 @@ mod tests { ); assert_eq!(balance, 100); - let deposits = state_tx.get_cached_block_deposits(); + let deposits = state_delta.get_cached_block_deposits(); assert_eq!(deposits.len(), 1); let expected_deposit = Deposit { @@ -1002,18 +1005,19 @@ mod tests { #[tokio::test] async fn receive_sink_zone_asset_on_bridge_account_and_emit_to_rollup() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let bridge_address = astria_address(&[99; 20]); let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); - state_tx.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state_tx + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta .put_ibc_compat_prefix(ASTRIA_COMPAT_PREFIX.to_string()) .unwrap(); - state_tx.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: bridge_address.bytes(), transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, @@ -1030,10 +1034,10 @@ mod tests { ) .parse::<Denom>() .unwrap(); - state_tx + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + state_delta .put_bridge_account_ibc_asset(&bridge_address, &remote_asset_on_sequencer) .unwrap(); @@ -1048,7 +1052,7 @@ mod tests { .unwrap(), }; receive_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_data).unwrap(), ..packet() @@ -1057,13 +1061,13 @@ mod tests { .await .unwrap(); - let balance = state_tx + let balance = state_delta .get_account_balance(&bridge_address, &remote_asset_on_sequencer) .await .expect("receipt of funds to a rollup should have updated funds in the bridge account"); assert_eq!(balance, amount); - let deposits = state_tx.get_cached_block_deposits(); + let deposits = state_delta.get_cached_block_deposits(); assert_eq!(deposits.len(), 1); let expected_deposit = Deposit { @@ -1081,17 +1085,16 @@ mod tests { #[tokio::test] async fn transfer_to_bridge_is_rejected_due_to_invalid_memo() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let bridge_address = astria_address(&[99; 20]); let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); - state_tx + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + state_delta .put_bridge_account_ibc_asset(&bridge_address, sink_asset()) .unwrap(); @@ -1105,7 +1108,7 @@ mod tests { // FIXME(janis): assert that the failure is actually due to the malformed memo // and not becase of some other input. let _ = receive_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_data).unwrap(), ..packet() @@ -1117,17 +1120,16 @@ mod tests { #[tokio::test] async fn transfer_to_bridge_account_is_rejected_due_to_not_permitted_token() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let bridge_address = astria_address(&[99; 20]); let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); - state_tx + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + state_delta .put_bridge_account_ibc_asset(&bridge_address, sink_asset()) .unwrap(); @@ -1144,7 +1146,7 @@ mod tests { // FIXME(janis): assert that the failure is actually due to the not permitted asset // and not because of some other input. let _ = receive_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_data).unwrap(), ..packet() @@ -1156,18 +1158,19 @@ mod tests { #[tokio::test] async fn refund_sequencer_account_with_source_zone_asset() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state_tx.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state_tx + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta .put_ibc_compat_prefix(ASTRIA_COMPAT_PREFIX.to_string()) .unwrap(); let recipient_address = astria_address(&[1; 20]); let amount = 100; - state_tx + state_delta .put_ibc_channel_balance(&packet().chan_on_a, &nria(), amount) .unwrap(); @@ -1180,7 +1183,7 @@ mod tests { }; refund_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_data).unwrap(), ..packet() @@ -1189,12 +1192,12 @@ mod tests { .await .expect("valid ics20 refund to user account; recipient, memo, and asset ID are valid"); - let balance = state_tx + let balance = state_delta .get_account_balance(&recipient_address, &nria()) .await .expect("ics20 refund to user account should succeed"); assert_eq!(balance, amount); - let balance = state_tx + let balance = state_delta .get_ibc_channel_balance(&packet().chan_on_a, &nria()) .await .expect("ics20 refund to user account from escrow account should succeed"); @@ -1203,18 +1206,19 @@ mod tests { #[tokio::test] async fn refund_sequencer_account_with_sink_zone_asset() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state_tx.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state_tx + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta .put_ibc_compat_prefix(ASTRIA_COMPAT_PREFIX.to_string()) .unwrap(); let recipient_address = astria_address(&[1; 20]); let amount = 100; - state_tx + state_delta .put_ibc_channel_balance(&packet().chan_on_a, &sink_asset(), amount) .unwrap(); @@ -1227,7 +1231,7 @@ mod tests { }; refund_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_data).unwrap(), ..packet() @@ -1236,12 +1240,12 @@ mod tests { .await .expect("valid ics20 refund to user account; recipient, memo, and asset ID are valid"); - let balance = state_tx + let balance = state_delta .get_account_balance(&recipient_address, &sink_asset()) .await .expect("ics20 refund to user account should succeed"); assert_eq!(balance, amount); - let balance = state_tx + let balance = state_delta .get_ibc_channel_balance(&packet().chan_on_a, &sink_asset()) .await .expect("ics20 refund to user account from escrow account should succeed"); @@ -1250,12 +1254,13 @@ mod tests { #[tokio::test] async fn refund_rollup_with_sink_zone_asset() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state_tx.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state_tx + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta .put_ibc_compat_prefix(ASTRIA_COMPAT_PREFIX.to_string()) .unwrap(); @@ -1263,21 +1268,21 @@ mod tests { let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); - state_tx.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: bridge_address.bytes(), transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, }); - state_tx + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + state_delta .put_bridge_account_ibc_asset(&bridge_address, sink_asset()) .unwrap(); let amount = 100; - state_tx + state_delta .put_ibc_channel_balance(&packet().chan_on_a, &sink_asset(), amount) .unwrap(); @@ -1298,7 +1303,7 @@ mod tests { .unwrap(), }; refund_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_data).unwrap(), ..packet() @@ -1307,13 +1312,13 @@ mod tests { .await .expect("valid rollup withdrawal refund"); - let balance = state_tx + let balance = state_delta .get_account_balance(&bridge_address, &sink_asset()) .await .expect("rollup withdrawal refund should have updated funds in the bridge address"); assert_eq!(balance, amount); - let deposit = state_tx.get_cached_block_deposits(); + let deposit = state_delta.get_cached_block_deposits(); let expected_deposit = Deposit { bridge_address, @@ -1329,17 +1334,18 @@ mod tests { #[tokio::test] async fn refund_rollup_with_source_zone_asset() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state_tx.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state_tx + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta .put_ibc_compat_prefix(ASTRIA_COMPAT_PREFIX.to_string()) .unwrap(); let amount = 100; - state_tx + state_delta .put_ibc_channel_balance(&packet().chan_on_a, &nria(), amount) .unwrap(); @@ -1347,16 +1353,16 @@ mod tests { let destination_chain_address = "rollup-defined"; let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); - state_tx.put_transaction_context(TransactionContext { + state_delta.put_transaction_context(TransactionContext { address_bytes: bridge_address.bytes(), transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, }); - state_tx + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + state_delta .put_bridge_account_ibc_asset(&bridge_address, nria()) .unwrap(); @@ -1375,7 +1381,7 @@ mod tests { }; refund_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_denom).unwrap(), ..packet() @@ -1384,13 +1390,13 @@ mod tests { .await .unwrap(); - let balance = state_tx + let balance = state_delta .get_account_balance(&bridge_address, &nria()) .await .expect("refunds of rollup withdrawals should be credited to the bridge account"); assert_eq!(balance, amount); - let deposits = state_tx.get_cached_block_deposits(); + let deposits = state_delta.get_cached_block_deposits(); let deposit = deposits .get(&rollup_id) @@ -1411,12 +1417,13 @@ mod tests { #[tokio::test] async fn refund_rollup_with_source_zone_asset_compat_prefix() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot.clone()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state_tx.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state_tx + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta .put_ibc_compat_prefix(ASTRIA_COMPAT_PREFIX.to_string()) .unwrap(); @@ -1425,16 +1432,16 @@ mod tests { let destination_chain_address = "rollup-defined-address".to_string(); let amount = 100; - state_tx + state_delta .put_ibc_channel_balance(&packet().chan_on_a, &nria(), amount) .unwrap(); let rollup_id = RollupId::from_unhashed_bytes(b"testchainid"); - state_tx + state_delta .put_bridge_account_rollup_id(&bridge_address, rollup_id) .unwrap(); - state_tx + state_delta .put_bridge_account_ibc_asset(&bridge_address, nria()) .unwrap(); @@ -1457,10 +1464,10 @@ mod tests { transaction_id: TransactionId::new([0; 32]), position_in_transaction: 0, }; - state_tx.put_transaction_context(transaction_context); + state_delta.put_transaction_context(transaction_context); refund_tokens( - &mut state_tx, + &mut state_delta, &Packet { data: serde_json::to_vec(&packet_data).unwrap(), ..packet() @@ -1469,13 +1476,13 @@ mod tests { .await .unwrap(); - let balance = state_tx + let balance = state_delta .get_account_balance(&bridge_address, &nria()) .await .expect("refunding a rollup should add the tokens to its bridge address"); assert_eq!(balance, amount); - let deposits = state_tx.get_cached_block_deposits(); + let deposits = state_delta.get_cached_block_deposits(); assert_eq!(deposits.len(), 1); let deposit = deposits.get(&rollup_id).unwrap().first().unwrap(); diff --git a/crates/astria-sequencer/src/ibc/state_ext.rs b/crates/astria-sequencer/src/ibc/state_ext.rs index 3307e5b2e8..76771f2f1e 100644 --- a/crates/astria-sequencer/src/ibc/state_ext.rs +++ b/crates/astria-sequencer/src/ibc/state_ext.rs @@ -163,16 +163,14 @@ impl<T: StateWrite> StateWriteExt for T {} #[cfg(test)] mod tests { - use cnidarium::StateDelta; - use super::*; use crate::{ - address::StateWriteExt, + address::StateWriteExt as _, benchmark_and_test_utils::{ astria_address, ASTRIA_PREFIX, }, - ibc::StateWriteExt as _, + storage::Storage, }; fn asset_0() -> asset::Denom { @@ -185,12 +183,11 @@ mod tests { #[tokio::test] async fn get_ibc_sudo_address_fails_if_not_set() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); // should fail if not set - let _ = state + let _ = state_delta .get_ibc_sudo_address() .await .expect_err("sudo address should be set"); @@ -198,19 +195,20 @@ mod tests { #[tokio::test] async fn put_ibc_sudo_address() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); // can write new let mut address = [42u8; 20]; - state + state_delta .put_ibc_sudo_address(address) .expect("writing sudo address should not fail"); assert_eq!( - state + state_delta .get_ibc_sudo_address() .await .expect("a sudo address was written and must exist inside the database"), @@ -220,11 +218,11 @@ mod tests { // can rewrite with new value address = [41u8; 20]; - state + state_delta .put_ibc_sudo_address(address) .expect("writing sudo address should not fail"); assert_eq!( - state + state_delta .get_ibc_sudo_address() .await .expect("sudo address was written and must exist inside the database"), @@ -235,16 +233,17 @@ mod tests { #[tokio::test] async fn is_ibc_relayer_ok_if_not_set() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); // unset address returns false let address = astria_address(&[42u8; 20]); assert!( - !state + !state_delta .is_ibc_relayer(address) .await .expect("calls to properly formatted addresses should not fail"), @@ -254,17 +253,18 @@ mod tests { #[tokio::test] async fn delete_ibc_relayer_address() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); // can write let address = astria_address(&[42u8; 20]); - state.put_ibc_relayer_address(&address).unwrap(); + state_delta.put_ibc_relayer_address(&address).unwrap(); assert!( - state + state_delta .is_ibc_relayer(address) .await .expect("a relayer address was written and must exist inside the database"), @@ -272,9 +272,9 @@ mod tests { ); // can delete - state.delete_ibc_relayer_address(&address); + state_delta.delete_ibc_relayer_address(&address); assert!( - !state + !state_delta .is_ibc_relayer(address) .await .expect("calls on unset addresses should not fail"), @@ -284,17 +284,18 @@ mod tests { #[tokio::test] async fn put_ibc_relayer_address() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); // can write let address = astria_address(&[42u8; 20]); - state.put_ibc_relayer_address(&address).unwrap(); + state_delta.put_ibc_relayer_address(&address).unwrap(); assert!( - state + state_delta .is_ibc_relayer(address) .await .expect("a relayer address was written and must exist inside the database"), @@ -303,16 +304,16 @@ mod tests { // can write multiple let address_1 = astria_address(&[41u8; 20]); - state.put_ibc_relayer_address(&address_1).unwrap(); + state_delta.put_ibc_relayer_address(&address_1).unwrap(); assert!( - state + state_delta .is_ibc_relayer(address_1) .await .expect("a relayer address was written and must exist inside the database"), "additional stored relayer address could not be verified" ); assert!( - state + state_delta .is_ibc_relayer(address) .await .expect("a relayer address was written and must exist inside the database"), @@ -322,15 +323,14 @@ mod tests { #[tokio::test] async fn get_ibc_channel_balance_unset_ok() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let state_delta = storage.new_delta_of_latest_snapshot(); let channel = ChannelId::new(0u64); let asset = asset_0(); assert_eq!( - state + state_delta .get_ibc_channel_balance(&channel, &asset) .await .expect("retrieving asset balance for channel should not fail"), @@ -341,20 +341,19 @@ mod tests { #[tokio::test] async fn put_ibc_channel_balance_simple() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let channel = ChannelId::new(0u64); let asset = asset_0(); let mut amount = 10u128; // write initial - state + state_delta .put_ibc_channel_balance(&channel, &asset, amount) .expect("should be able to set balance for channel and asset pair"); assert_eq!( - state + state_delta .get_ibc_channel_balance(&channel, &asset) .await .expect("retrieving asset balance for channel should not fail"), @@ -364,11 +363,11 @@ mod tests { // can update amount = 20u128; - state + state_delta .put_ibc_channel_balance(&channel, &asset, amount) .expect("should be able to set balance for channel and asset pair"); assert_eq!( - state + state_delta .get_ibc_channel_balance(&channel, &asset) .await .expect("retrieving asset balance for channel should not fail"), @@ -379,9 +378,8 @@ mod tests { #[tokio::test] async fn put_ibc_channel_balance_multiple_assets() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let channel = ChannelId::new(0u64); let asset_0 = asset_0(); @@ -390,14 +388,14 @@ mod tests { let amount_1 = 20u128; // write both - state + state_delta .put_ibc_channel_balance(&channel, &asset_0, amount_0) .expect("should be able to set balance for channel and asset pair"); - state + state_delta .put_ibc_channel_balance(&channel, &asset_1, amount_1) .expect("should be able to set balance for channel and asset pair"); assert_eq!( - state + state_delta .get_ibc_channel_balance(&channel, &asset_0) .await .expect("retrieving asset balance for channel should not fail"), @@ -405,7 +403,7 @@ mod tests { "set balance for channel/asset pair not what was expected" ); assert_eq!( - state + state_delta .get_ibc_channel_balance(&channel, &asset_1) .await .expect("retrieving asset balance for channel should not fail"), @@ -416,9 +414,8 @@ mod tests { #[tokio::test] async fn put_ibc_channel_balance_multiple_channels() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let channel_0 = ChannelId::new(0u64); let channel_1 = ChannelId::new(1u64); @@ -427,14 +424,14 @@ mod tests { let amount_1 = 20u128; // write both - state + state_delta .put_ibc_channel_balance(&channel_0, &asset, amount_0) .expect("should be able to set balance for channel and asset pair"); - state + state_delta .put_ibc_channel_balance(&channel_1, &asset, amount_1) .expect("should be able to set balance for channel and asset pair"); assert_eq!( - state + state_delta .get_ibc_channel_balance(&channel_0, &asset) .await .expect("retrieving asset balance for channel should not fail"), @@ -442,7 +439,7 @@ mod tests { "set balance for channel/asset pair not what was expected" ); assert_eq!( - state + state_delta .get_ibc_channel_balance(&channel_1, &asset) .await .expect("retrieving asset balance for channel should not fail"), diff --git a/crates/astria-sequencer/src/mempool/mempool_state.rs b/crates/astria-sequencer/src/mempool/mempool_state.rs index e1676dcb02..a0c901fb5f 100644 --- a/crates/astria-sequencer/src/mempool/mempool_state.rs +++ b/crates/astria-sequencer/src/mempool/mempool_state.rs @@ -32,7 +32,6 @@ pub(crate) async fn get_account_balances<S: StateRead, T: AddressBytes>( #[cfg(test)] mod tests { use asset::Denom; - use cnidarium::StateDelta; use super::*; use crate::{ @@ -45,29 +44,29 @@ mod tests { astria_address, nria, }, + storage::Storage, }; #[tokio::test] async fn test_get_account_balances() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); // native account should work with ibc too - state.put_native_asset(nria()).unwrap(); + state_delta.put_native_asset(nria()).unwrap(); - let asset_0 = state.get_native_asset().await.unwrap().unwrap(); + let asset_0 = state_delta.get_native_asset().await.unwrap().unwrap(); let asset_1: Denom = "asset_0".parse().unwrap(); let asset_2: Denom = "asset_1".parse().unwrap(); // also need to add assets to the ibc state - state + state_delta .put_ibc_asset(asset_0.clone()) .expect("should be able to call other trait method on state object"); - state + state_delta .put_ibc_asset(asset_1.clone().unwrap_trace_prefixed()) .expect("should be able to call other trait method on state object"); - state + state_delta .put_ibc_asset(asset_2.clone().unwrap_trace_prefixed()) .expect("should be able to call other trait method on state object"); @@ -78,17 +77,17 @@ mod tests { let amount_expected_2 = 3u128; // add balances to the account - state + state_delta .put_account_balance(&address, &asset_0, amount_expected_0) .expect("putting an account balance should not fail"); - state + state_delta .put_account_balance(&address, &asset_1, amount_expected_1) .expect("putting an account balance should not fail"); - state + state_delta .put_account_balance(&address, &asset_2, amount_expected_2) .expect("putting an account balance should not fail"); - let balances = get_account_balances(state, &address).await.unwrap(); + let balances = get_account_balances(state_delta, &address).await.unwrap(); assert_eq!( balances.get(&asset_0.to_ibc_prefixed()).unwrap(), diff --git a/crates/astria-sequencer/src/metrics.rs b/crates/astria-sequencer/src/metrics.rs index a59f4d33bf..1ab749145c 100644 --- a/crates/astria-sequencer/src/metrics.rs +++ b/crates/astria-sequencer/src/metrics.rs @@ -11,6 +11,7 @@ use telemetry::{ }; const CHECK_TX_STAGE: &str = "stage"; +const CACHE_GET_OUTCOME: &str = "outcome"; pub struct Metrics { prepare_proposal_excluded_transactions_cometbft_space: Counter, @@ -40,6 +41,12 @@ pub struct Metrics { transactions_in_mempool_parked: Gauge, mempool_recosted: Counter, internal_logic_error: Counter, + verifiable_cache_hit: Counter, + verifiable_cache_miss: Counter, + verifiable_cache_item_total: Histogram, + non_verifiable_cache_hit: Counter, + non_verifiable_cache_miss: Counter, + non_verifiable_cache_item_total: Histogram, } impl Metrics { @@ -164,6 +171,30 @@ impl Metrics { pub(crate) fn increment_internal_logic_error(&self) { self.internal_logic_error.increment(1); } + + pub(crate) fn increment_verifiable_cache_hit(&self) { + self.verifiable_cache_hit.increment(1); + } + + pub(crate) fn increment_verifiable_cache_miss(&self) { + self.verifiable_cache_miss.increment(1); + } + + pub(crate) fn record_verifiable_cache_item_total(&self, total: usize) { + self.verifiable_cache_item_total.record(total); + } + + pub(crate) fn increment_non_verifiable_cache_hit(&self) { + self.non_verifiable_cache_hit.increment(1); + } + + pub(crate) fn increment_non_verifiable_cache_miss(&self) { + self.non_verifiable_cache_miss.increment(1); + } + + pub(crate) fn record_non_verifiable_cache_item_total(&self, total: usize) { + self.non_verifiable_cache_item_total.record(total); + } } impl telemetry::Metrics for Metrics { @@ -355,6 +386,36 @@ impl telemetry::Metrics for Metrics { )? .register()?; + let mut verifiable_cache_factory = builder.new_counter_factory( + VERIFIABLE_CACHE_GET_COUNT, + "The number of attempts to get data from the verifiable cache in storage", + )?; + let verifiable_cache_hit = verifiable_cache_factory + .register_with_labels(&[(CACHE_GET_OUTCOME, "hit".to_string())])?; + let verifiable_cache_miss = verifiable_cache_factory + .register_with_labels(&[(CACHE_GET_OUTCOME, "miss".to_string())])?; + let verifiable_cache_item_total = builder + .new_histogram_factory( + VERIFIABLE_CACHE_ITEM_TOTAL, + "The number of items in the verifiable cache in storage", + )? + .register()?; + + let mut non_verifiable_cache_factory = builder.new_counter_factory( + NON_VERIFIABLE_CACHE_GET_COUNT, + "The number of attempts to get data from the non-verifiable cache in storage", + )?; + let non_verifiable_cache_hit = non_verifiable_cache_factory + .register_with_labels(&[(CACHE_GET_OUTCOME, "hit".to_string())])?; + let non_verifiable_cache_miss = non_verifiable_cache_factory + .register_with_labels(&[(CACHE_GET_OUTCOME, "miss".to_string())])?; + let non_verifiable_cache_item_total = builder + .new_histogram_factory( + NON_VERIFIABLE_CACHE_ITEM_TOTAL, + "The number of items in the non-verifiable cache in storage", + )? + .register()?; + Ok(Self { prepare_proposal_excluded_transactions_cometbft_space, prepare_proposal_excluded_transactions_sequencer_space, @@ -383,6 +444,12 @@ impl telemetry::Metrics for Metrics { transactions_in_mempool_parked, mempool_recosted, internal_logic_error, + verifiable_cache_hit, + verifiable_cache_miss, + verifiable_cache_item_total, + non_verifiable_cache_hit, + non_verifiable_cache_miss, + non_verifiable_cache_item_total, }) } } @@ -411,32 +478,16 @@ metric_names!(const METRICS_NAMES: TRANSACTIONS_IN_MEMPOOL_TOTAL, TRANSACTIONS_IN_MEMPOOL_PARKED, MEMPOOL_RECOSTED, - INTERNAL_LOGIC_ERROR + INTERNAL_LOGIC_ERROR, + VERIFIABLE_CACHE_GET_COUNT, + NON_VERIFIABLE_CACHE_GET_COUNT, + VERIFIABLE_CACHE_ITEM_TOTAL, + NON_VERIFIABLE_CACHE_ITEM_TOTAL, ); #[cfg(test)] mod tests { - use super::{ - ACTIONS_PER_TRANSACTION_IN_MEMPOOL, - CHECK_TX_DURATION_SECONDS, - CHECK_TX_REMOVED_ACCOUNT_BALANCE, - CHECK_TX_REMOVED_EXPIRED, - CHECK_TX_REMOVED_FAILED_EXECUTION, - CHECK_TX_REMOVED_FAILED_STATELESS, - CHECK_TX_REMOVED_TOO_LARGE, - INTERNAL_LOGIC_ERROR, - MEMPOOL_RECOSTED, - PREPARE_PROPOSAL_EXCLUDED_TRANSACTIONS, - PREPARE_PROPOSAL_EXCLUDED_TRANSACTIONS_COMETBFT_SPACE, - PREPARE_PROPOSAL_EXCLUDED_TRANSACTIONS_FAILED_EXECUTION, - PREPARE_PROPOSAL_EXCLUDED_TRANSACTIONS_SEQUENCER_SPACE, - PROCESS_PROPOSAL_SKIPPED_PROPOSAL, - PROPOSAL_DEPOSITS, - PROPOSAL_TRANSACTIONS, - TRANSACTIONS_IN_MEMPOOL_PARKED, - TRANSACTIONS_IN_MEMPOOL_TOTAL, - TRANSACTION_IN_MEMPOOL_SIZE_BYTES, - }; + use super::*; #[track_caller] fn assert_const(actual: &'static str, suffix: &str) { @@ -503,5 +554,15 @@ mod tests { ); assert_const(MEMPOOL_RECOSTED, "mempool_recosted"); assert_const(INTERNAL_LOGIC_ERROR, "internal_logic_error"); + assert_const(VERIFIABLE_CACHE_GET_COUNT, "verifiable_cache_get_count"); + assert_const( + NON_VERIFIABLE_CACHE_GET_COUNT, + "non_verifiable_cache_get_count", + ); + assert_const(VERIFIABLE_CACHE_ITEM_TOTAL, "verifiable_cache_item_total"); + assert_const( + NON_VERIFIABLE_CACHE_ITEM_TOTAL, + "non_verifiable_cache_item_total", + ); } } diff --git a/crates/astria-sequencer/src/sequencer.rs b/crates/astria-sequencer/src/sequencer.rs index c5b3411d1c..6dda9709a7 100644 --- a/crates/astria-sequencer/src/sequencer.rs +++ b/crates/astria-sequencer/src/sequencer.rs @@ -1,12 +1,9 @@ use astria_core::generated::astria::sequencerblock::v1::sequencer_service_server::SequencerServiceServer; -use astria_eyre::{ - anyhow_to_eyre, - eyre::{ - eyre, - OptionExt as _, - Result, - WrapErr as _, - }, +use astria_eyre::eyre::{ + eyre, + OptionExt as _, + Result, + WrapErr as _, }; use penumbra_tower_trace::{ trace::request_span, @@ -41,6 +38,7 @@ use crate::{ mempool::Mempool, metrics::Metrics, service, + storage::Storage, }; pub struct Sequencer; @@ -72,15 +70,15 @@ impl Sequencer { let substore_prefixes = vec![penumbra_ibc::IBC_SUBSTORE_PREFIX]; - let storage = cnidarium::Storage::load( + let storage = Storage::load( config.db_filepath.clone(), substore_prefixes .into_iter() - .map(std::string::ToString::to_string) + .map(ToString::to_string) .collect(), + metrics, ) .await - .map_err(anyhow_to_eyre) .wrap_err("failed to load storage backing chain state")?; let snapshot = storage.latest_snapshot(); @@ -151,12 +149,16 @@ impl Sequencer { .wrap_err("grpc server task failed")? .wrap_err("grpc server failed")?; server_handle.abort(); + // We don't care about the returned value - it's likely a `cancelled` error. + let _ = server_handle.await; + // Shut down storage. + storage.release().await; Ok(()) } } fn start_grpc_server( - storage: &cnidarium::Storage, + storage: &Storage, mempool: Mempool, grpc_addr: std::net::SocketAddr, shutdown_rx: oneshot::Receiver<()>, @@ -170,7 +172,7 @@ fn start_grpc_server( use penumbra_tower_trace::remote_addr; use tower_http::cors::CorsLayer; - let ibc = penumbra_ibc::component::rpc::IbcQuery::<AstriaHost>::new(storage.clone()); + let ibc = penumbra_ibc::component::rpc::IbcQuery::<AstriaHost>::new(storage.inner()); let sequencer_api = SequencerServer::new(storage.clone(), mempool); let cors_layer: CorsLayer = CorsLayer::permissive(); diff --git a/crates/astria-sequencer/src/service/consensus.rs b/crates/astria-sequencer/src/service/consensus.rs index a0b6a4a5ae..2eaf804538 100644 --- a/crates/astria-sequencer/src/service/consensus.rs +++ b/crates/astria-sequencer/src/service/consensus.rs @@ -4,7 +4,6 @@ use astria_eyre::eyre::{ Result, WrapErr as _, }; -use cnidarium::Storage; use tendermint::v0_38::abci::{ request, response, @@ -20,7 +19,10 @@ use tracing::{ Instrument, }; -use crate::app::App; +use crate::{ + app::App, + storage::Storage, +}; pub(crate) struct Consensus { queue: mpsc::Receiver<Message<ConsensusRequest, ConsensusResponse, tower::BoxError>>, @@ -470,7 +472,7 @@ mod tests { .try_into() .unwrap(); - let storage = cnidarium::TempStorage::new().await.unwrap(); + let storage = Storage::new_temp().await; let snapshot = storage.latest_snapshot(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); let mempool = Mempool::new(metrics, 100); diff --git a/crates/astria-sequencer/src/service/info/abci_query_router.rs b/crates/astria-sequencer/src/service/info/abci_query_router.rs index 00a127772d..114466732a 100644 --- a/crates/astria-sequencer/src/service/info/abci_query_router.rs +++ b/crates/astria-sequencer/src/service/info/abci_query_router.rs @@ -38,7 +38,6 @@ use std::{ pin::Pin, }; -use cnidarium::Storage; use matchit::{ Match, MatchError, @@ -48,6 +47,8 @@ use tendermint::abci::{ response, }; +use crate::storage::Storage; + #[derive(Debug, thiserror::Error)] #[error("`{route}` is an invalid route")] pub(crate) struct InsertError { diff --git a/crates/astria-sequencer/src/service/info/mod.rs b/crates/astria-sequencer/src/service/info/mod.rs index e093514cae..a0d2577139 100644 --- a/crates/astria-sequencer/src/service/info/mod.rs +++ b/crates/astria-sequencer/src/service/info/mod.rs @@ -8,7 +8,6 @@ use std::{ use astria_core::protocol::abci::AbciErrorCode; use astria_eyre::eyre::WrapErr as _; -use cnidarium::Storage; use futures::{ Future, FutureExt, @@ -30,12 +29,11 @@ use tracing::{ Instrument as _, }; +use crate::storage::Storage; + mod abci_query_router; -use astria_eyre::{ - anyhow_to_eyre, - eyre::Result, -}; +use astria_eyre::eyre::Result; use crate::app::StateReadExt as _; @@ -100,7 +98,6 @@ impl Info { .latest_snapshot() .root_hash() .await - .map_err(anyhow_to_eyre) .wrap_err("failed to get app hash")?; let response = InfoResponse::Info(response::Info { @@ -145,7 +142,7 @@ impl Info { (handler, params) } }; - handler.call(self.storage.clone(), request, params).await + handler.call(self.storage, request, params).await } } @@ -193,10 +190,7 @@ mod tests { }, }, }; - use cnidarium::{ - StateDelta, - StateWrite, - }; + use cnidarium::StateDelta; use penumbra_ibc::IbcRelay; use prost::Message as _; @@ -214,6 +208,10 @@ mod tests { StateReadExt as _, StateWriteExt as _, }, + storage::{ + Snapshot, + Storage, + }, }; #[tokio::test] @@ -223,31 +221,30 @@ mod tests { protocol::account::v1::AssetBalance, }; - let storage = cnidarium::TempStorage::new() - .await - .expect("failed to create temp storage backing chain state"); + let storage = Storage::new_temp().await; + let height = 99; let version = storage.latest_version().wrapping_add(1); - let mut state = StateDelta::new(storage.latest_snapshot()); - state + let mut state_delta = storage.new_delta_of_latest_snapshot(); + state_delta .put_storage_version_by_height(height, version) .unwrap(); - state.put_base_prefix("astria".to_string()).unwrap(); - state.put_native_asset(nria()).unwrap(); - state.put_ibc_asset(nria()).unwrap(); + state_delta.put_base_prefix("astria".to_string()).unwrap(); + state_delta.put_native_asset(nria()).unwrap(); + state_delta.put_ibc_asset(nria()).unwrap(); - let address = state + let address = state_delta .try_base_prefixed(&hex::decode("a034c743bed8f26cb8ee7b8db2230fd8347ae131").unwrap()) .await .unwrap(); let balance = 1000; - state + state_delta .put_account_balance(&address, &nria(), balance) .unwrap(); - state.put_block_height(height).unwrap(); - storage.commit(state).await.unwrap(); + state_delta.put_block_height(height).unwrap(); + storage.commit(state_delta).await.unwrap(); let info_request = InfoRequest::Query(request::Query { path: format!("accounts/balance/{address}"), @@ -257,7 +254,7 @@ mod tests { }); let response = { - let storage = (*storage).clone(); + let storage = storage.clone(); let info_service = Info::new(storage).unwrap(); info_service .handle_info_request(info_request) @@ -288,14 +285,14 @@ mod tests { async fn handle_denom_query() { use astria_core::generated::astria::protocol::asset::v1 as raw; - let storage = cnidarium::TempStorage::new().await.unwrap(); - let mut state = StateDelta::new(storage.latest_snapshot()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let denom: asset::TracePrefixed = "some/ibc/asset".parse().unwrap(); let height = 99; - state.put_block_height(height).unwrap(); - state.put_ibc_asset(denom.clone()).unwrap(); - storage.commit(state).await.unwrap(); + state_delta.put_block_height(height).unwrap(); + state_delta.put_ibc_asset(denom.clone()).unwrap(); + storage.commit(state_delta).await.unwrap(); let info_request = InfoRequest::Query(request::Query { path: format!( @@ -308,7 +305,7 @@ mod tests { }); let response = { - let storage = (*storage).clone(); + let storage = storage.clone(); let info_service = Info::new(storage).unwrap(); info_service .handle_info_request(info_request) @@ -332,8 +329,8 @@ mod tests { async fn handle_allowed_fee_assets_query() { use astria_core::generated::astria::protocol::asset::v1 as raw; - let storage = cnidarium::TempStorage::new().await.unwrap(); - let mut state = StateDelta::new(storage.latest_snapshot()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let assets = vec![ "asset_0".parse::<asset::Denom>().unwrap(), @@ -343,17 +340,17 @@ mod tests { let height = 99; for asset in &assets { - state.put_allowed_fee_asset(asset).unwrap(); + state_delta.put_allowed_fee_asset(asset).unwrap(); assert!( - state + state_delta .is_allowed_fee_asset(asset) .await .expect("checking for allowed fee asset should not fail"), "fee asset was expected to be allowed" ); } - state.put_block_height(height).unwrap(); - storage.commit(state).await.unwrap(); + state_delta.put_block_height(height).unwrap(); + storage.commit(state_delta).await.unwrap(); let info_request = InfoRequest::Query(request::Query { path: "asset/allowed_fee_assets".to_string(), @@ -363,7 +360,7 @@ mod tests { }); let response = { - let storage = (*storage).clone(); + let storage = storage.clone(); let info_service = Info::new(storage).unwrap(); info_service .handle_info_request(info_request) @@ -394,14 +391,14 @@ mod tests { #[tokio::test] async fn handle_fee_components() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let mut state = StateDelta::new(storage.latest_snapshot()); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); let height = 99; - state.put_block_height(height).unwrap(); - write_all_the_fees(&mut state); - storage.commit(state).await.unwrap(); + state_delta.put_block_height(height).unwrap(); + write_all_the_fees(&mut state_delta); + storage.commit(state_delta).await.unwrap(); let info_request = InfoRequest::Query(request::Query { path: "fees/components".to_string(), @@ -411,7 +408,7 @@ mod tests { }); let response = { - let storage = (*storage).clone(); + let storage = storage.clone(); let info_service = Info::new(storage).unwrap(); info_service .handle_info_request(info_request) @@ -491,47 +488,47 @@ mod tests { }) } - fn write_all_the_fees<S: StateWrite>(mut state: S) { - state + fn write_all_the_fees(state_delta: &mut StateDelta<Snapshot>) { + state_delta .put_fees(FeeComponents::<BridgeLock>::new(1, 1)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<BridgeUnlock>::new(2, 2)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<BridgeSudoChange>::new(3, 3)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<FeeAssetChange>::new(4, 4)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<FeeChange>::new(5, 5)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<InitBridgeAccount>::new(6, 6)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<IbcRelay>::new(7, 7)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<IbcRelayerChange>::new(8, 8)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<IbcSudoChange>::new(9, 9)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<Ics20Withdrawal>::new(10, 10)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<RollupDataSubmission>::new(11, 11)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<SudoAddressChange>::new(12, 12)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<Transfer>::new(13, 13)) .unwrap(); - state + state_delta .put_fees(FeeComponents::<ValidatorUpdate>::new(14, 14)) .unwrap(); } diff --git a/crates/astria-sequencer/src/service/mempool/mod.rs b/crates/astria-sequencer/src/service/mempool/mod.rs index 47bd1dfe91..a013275d7b 100644 --- a/crates/astria-sequencer/src/service/mempool/mod.rs +++ b/crates/astria-sequencer/src/service/mempool/mod.rs @@ -20,10 +20,7 @@ use astria_core::{ }; use astria_eyre::eyre::WrapErr as _; use bytes::Bytes; -use cnidarium::{ - StateRead, - Storage, -}; +use cnidarium::StateRead; use futures::{ Future, FutureExt, @@ -59,6 +56,7 @@ use crate::{ RemovalReason, }, metrics::Metrics, + storage::Storage, transaction, }; diff --git a/crates/astria-sequencer/src/service/mempool/tests.rs b/crates/astria-sequencer/src/service/mempool/tests.rs index 2baa7bda6b..5675f30e0e 100644 --- a/crates/astria-sequencer/src/service/mempool/tests.rs +++ b/crates/astria-sequencer/src/service/mempool/tests.rs @@ -21,12 +21,13 @@ use crate::{ Mempool, RemovalReason, }, + storage::Storage, }; #[tokio::test] async fn future_nonces_are_accepted() { // The mempool should allow future nonces. - let storage = cnidarium::TempStorage::new().await.unwrap(); + let storage = Storage::new_temp().await; let snapshot = storage.latest_snapshot(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); @@ -56,7 +57,7 @@ async fn future_nonces_are_accepted() { #[tokio::test] async fn rechecks_pass() { // The mempool should not fail rechecks of transactions. - let storage = cnidarium::TempStorage::new().await.unwrap(); + let storage = Storage::new_temp().await; let snapshot = storage.latest_snapshot(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); @@ -94,7 +95,7 @@ async fn can_reinsert_after_recheck_fail() { // The mempool should be able to re-insert a transaction after a recheck fails due to the // transaction being removed from the appside mempool. This is to allow users to re-insert // if they wish to do so. - let storage = cnidarium::TempStorage::new().await.unwrap(); + let storage = Storage::new_temp().await; let snapshot = storage.latest_snapshot(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); @@ -142,7 +143,7 @@ async fn recheck_adds_non_tracked_tx() { // The mempool should be able to insert a transaction on recheck if it isn't in the mempool. // This could happen in the case of a sequencer restart as the cometbft mempool persists but // the appside one does not. - let storage = cnidarium::TempStorage::new().await.unwrap(); + let storage = Storage::new_temp().await; let snapshot = storage.latest_snapshot(); let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); diff --git a/crates/astria-sequencer/src/storage/mod.rs b/crates/astria-sequencer/src/storage/mod.rs index a4a1b61c98..5f6e838205 100644 --- a/crates/astria-sequencer/src/storage/mod.rs +++ b/crates/astria-sequencer/src/storage/mod.rs @@ -1,4 +1,337 @@ +use std::{ + fmt::{ + self, + Debug, + Formatter, + }, + path::PathBuf, + sync::{ + Arc, + Mutex, + }, +}; + +use astria_eyre::{ + anyhow_to_eyre, + eyre, +}; +use cnidarium::{ + RootHash, + StagedWriteBatch, + StateDelta, +}; + +pub(crate) use self::{ + snapshot::Snapshot, + stored_value::StoredValue, +}; +use crate::Metrics; + pub(crate) mod keys; +mod snapshot; mod stored_value; -pub(crate) use stored_value::StoredValue; +#[derive(Clone)] +pub(crate) struct Storage { + inner: cnidarium::Storage, + latest_snapshot: Arc<Mutex<Snapshot>>, + metrics: &'static Metrics, + #[cfg(any(test, feature = "benchmark"))] + _temp_dir: Option<Arc<tempfile::TempDir>>, +} + +impl Storage { + pub(crate) async fn load( + path: PathBuf, + prefixes: Vec<String>, + metrics: &'static Metrics, + ) -> astria_eyre::Result<Self> { + let inner = cnidarium::Storage::load(path, prefixes) + .await + .map_err(anyhow_to_eyre)?; + let latest_snapshot = Arc::new(Mutex::new(Snapshot::new(inner.latest_snapshot(), metrics))); + Ok(Self { + inner, + latest_snapshot, + metrics, + #[cfg(any(test, feature = "benchmark"))] + _temp_dir: None, + }) + } + + #[cfg(any(test, feature = "benchmark"))] + pub(crate) async fn new_temp() -> Self { + use telemetry::Metrics as _; + + let temp_dir = tempfile::tempdir().unwrap_or_else(|error| { + panic!("failed to create temp dir when constructing storage instance: {error}") + }); + let db_path = temp_dir.path().join("storage.db"); + let inner = cnidarium::Storage::init(db_path.clone(), vec![]) + .await + .unwrap_or_else(|error| { + panic!( + "failed to initialize storage at `{}`: {error:#}", + db_path.display() + ) + }); + let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); + let latest_snapshot = Arc::new(Mutex::new(Snapshot::new(inner.latest_snapshot(), metrics))); + + Self { + inner, + latest_snapshot, + metrics, + _temp_dir: Some(Arc::new(temp_dir)), + } + } + + /// Returns the latest version (block height) of the tree recorded by `Storage`. + /// + /// If the tree is empty and has not been initialized, returns `u64::MAX`. + pub(crate) fn latest_version(&self) -> u64 { + self.inner.latest_version() + } + + /// Returns a new `Snapshot` on top of the latest version of the tree. + pub(crate) fn latest_snapshot(&self) -> Snapshot { + self.latest_snapshot.lock().unwrap().clone() + } + + /// Returns the `Snapshot` corresponding to the given version. + pub(crate) fn snapshot(&self, version: u64) -> Option<Snapshot> { + Some(Snapshot::new(self.inner.snapshot(version)?, self.metrics)) + } + + /// Returns a new `Delta` on top of the latest version of the tree. + pub(crate) fn new_delta_of_latest_snapshot(&self) -> StateDelta<Snapshot> { + self.latest_snapshot().new_delta() + } + + /// Returns a clone of the wrapped `cnidarium::Storage`. + pub(crate) fn inner(&self) -> cnidarium::Storage { + self.inner.clone() + } + + /// Prepares a commit for the provided `SnapshotDelta`, returning a `StagedWriteBatch`. + /// + /// The batch can be committed to the database using the [`Storage::commit_batch`] method. + pub(crate) async fn prepare_commit( + &self, + delta: StateDelta<Snapshot>, + ) -> eyre::Result<StagedWriteBatch> { + let (snapshot, changes) = delta.flatten(); + let cnidarium_snapshot = snapshot.into_inner(); + let mut cnidarium_delta = StateDelta::new(cnidarium_snapshot); + changes.apply_to(&mut cnidarium_delta); + self.inner + .prepare_commit(cnidarium_delta) + .await + .map_err(anyhow_to_eyre) + } + + /// Commits the provided `SnapshotDelta` to persistent storage as the latest version of the + /// chain state. + #[cfg(test)] + pub(crate) async fn commit(&self, delta: StateDelta<Snapshot>) -> eyre::Result<RootHash> { + let batch = self.prepare_commit(delta).await?; + self.commit_batch(batch) + } + + /// Commits the supplied `StagedWriteBatch` to persistent storage. + pub(crate) fn commit_batch(&self, batch: StagedWriteBatch) -> eyre::Result<RootHash> { + let root_hash = self.inner.commit_batch(batch).map_err(anyhow_to_eyre)?; + let mut ls = self.latest_snapshot.lock().unwrap(); + *ls = Snapshot::new(self.inner.latest_snapshot(), self.metrics); + Ok(root_hash) + } + + /// Shuts down the database and the dispatcher task, and waits for all resources to be + /// reclaimed. + /// + /// # Panics + /// + /// Panics if there is more than one clone remaining of the `cnidarium::Inner` storage `Arc`. + pub(crate) async fn release(self) { + self.inner.release().await; + } +} + +impl Debug for Storage { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +#[cfg(test)] +mod tests { + use cnidarium::{ + StateRead as _, + StateWrite as _, + }; + use telemetry::Metrics as _; + + use super::*; + + const V_KEY: &str = "verifiable key"; + const NV_KEY: &[u8] = b"non-verifiable key"; + const VALUES: [[u8; 1]; 4] = [[1], [2], [3], [4]]; + + #[test] + fn should_prepare_and_commit_batch() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("storage_test"); + let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); + + // Run the tests on the first storage instance. + // + // NOTE: `cnidarium::Storage::load` panics if we try to open it more than once from the same + // thread, even if the first instance is dropped. We use two separate tokio runtimes to + // avoid this. + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap() + .block_on(async { + let storage = Storage::load(db_path.clone(), vec![], metrics) + .await + .unwrap(); + + // Check there's no previous snapshots available. + assert!(storage.snapshot(0).is_none()); + + // Write data to the verifiable and non-verifiable stores. + let mut state_delta = storage.new_delta_of_latest_snapshot(); + state_delta.put_raw(V_KEY.to_string(), VALUES[0].to_vec()); + state_delta.nonverifiable_put_raw(NV_KEY.to_vec(), VALUES[1].to_vec()); + + // Commit the data. + let batch = storage.prepare_commit(state_delta).await.unwrap(); + storage.commit_batch(batch).unwrap(); + + // Check the data is available in a new latest snapshot, and a snapshot at v0 + // (the only version currently available). + let snapshot_0 = storage.snapshot(0).unwrap(); + assert_eq!( + Some(VALUES[0].to_vec()), + snapshot_0.get_raw(V_KEY).await.unwrap() + ); + assert_eq!( + Some(VALUES[1].to_vec()), + snapshot_0.nonverifiable_get_raw(NV_KEY).await.unwrap() + ); + + let snapshot_latest = storage.latest_snapshot(); + assert_eq!( + Some(VALUES[0].to_vec()), + snapshot_latest.get_raw(V_KEY).await.unwrap() + ); + assert_eq!( + Some(VALUES[1].to_vec()), + snapshot_latest.nonverifiable_get_raw(NV_KEY).await.unwrap() + ); + + // Check there's no snapshot v1. + assert!(storage.snapshot(1).is_none()); + + // Shut down the original storage instance. + storage.release().await; + }); + + // Open a new storage instance using the same DB file and run follow-up tests. + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap() + .block_on(async { + let storage = Storage::load(db_path.clone(), vec![], metrics) + .await + .unwrap(); + + // Check the data is available in snapshot v0 (the only snapshot available now). + let snapshot_0 = storage.snapshot(0).unwrap(); + assert_eq!( + Some(VALUES[0].to_vec()), + snapshot_0.get_raw(V_KEY).await.unwrap() + ); + assert_eq!( + Some(VALUES[1].to_vec()), + snapshot_0.nonverifiable_get_raw(NV_KEY).await.unwrap() + ); + + // Overwrite the values and commit these changes. + let mut state_delta = storage.new_delta_of_latest_snapshot(); + state_delta.put_raw(V_KEY.to_string(), VALUES[2].to_vec()); + state_delta.nonverifiable_put_raw(NV_KEY.to_vec(), VALUES[3].to_vec()); + let batch = storage.prepare_commit(state_delta).await.unwrap(); + storage.commit_batch(batch).unwrap(); + + // Check the data has the original values in snapshot v0, but the new values in + // the latest snapshot (v1). + let snapshot_0 = storage.snapshot(0).unwrap(); + assert_eq!( + Some(VALUES[0].to_vec()), + snapshot_0.get_raw(V_KEY).await.unwrap() + ); + assert_eq!( + Some(VALUES[1].to_vec()), + snapshot_0.nonverifiable_get_raw(NV_KEY).await.unwrap() + ); + + let snapshot_latest = storage.latest_snapshot(); + assert_eq!( + Some(VALUES[2].to_vec()), + snapshot_latest.get_raw(V_KEY).await.unwrap() + ); + assert_eq!( + Some(VALUES[3].to_vec()), + snapshot_latest.nonverifiable_get_raw(NV_KEY).await.unwrap() + ); + + // Check snapshot v1 exists, and there's no snapshot v2. + assert!(storage.snapshot(1).is_some()); + assert!(storage.snapshot(2).is_none()); + }); + } + + #[tokio::test] + async fn should_not_commit_invalid_batch() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("storage_test"); + let metrics = Box::leak(Box::new(Metrics::noop_metrics(&()).unwrap())); + let storage = Storage::load(db_path.clone(), vec![], metrics) + .await + .unwrap(); + + // Write and commit data twice. + let mut state_delta = storage.new_delta_of_latest_snapshot(); + state_delta.put_raw(V_KEY.to_string(), VALUES[0].to_vec()); + storage.commit(state_delta).await.unwrap(); + + state_delta = storage.new_delta_of_latest_snapshot(); + state_delta.nonverifiable_put_raw(NV_KEY.to_vec(), VALUES[1].to_vec()); + storage.commit(state_delta).await.unwrap(); + + // Assert we have two snapshot versions available. + assert!(storage.snapshot(0).is_some()); + assert!(storage.snapshot(1).is_some()); + assert!(storage.snapshot(2).is_none()); + + // Create a new state delta from snapshot v0 and try to commit it - should fail. + state_delta = storage.snapshot(0).unwrap().new_delta(); + match storage.prepare_commit(state_delta).await { + Ok(_) => panic!("should fail to prepare commit for an existing snapshot version"), + Err(error) => { + assert!(error.to_string().contains( + "trying to prepare a commit for a delta forked from version 0, but the latest \ + version is 1" + )); + } + } + + // Assert we still have two snapshot versions available. + assert!(storage.snapshot(0).is_some()); + assert!(storage.snapshot(1).is_some()); + assert!(storage.snapshot(2).is_none()); + } +} diff --git a/crates/astria-sequencer/src/storage/snapshot.rs b/crates/astria-sequencer/src/storage/snapshot.rs new file mode 100644 index 0000000000..5cb77a252f --- /dev/null +++ b/crates/astria-sequencer/src/storage/snapshot.rs @@ -0,0 +1,488 @@ +use std::{ + any::{ + Any, + TypeId, + }, + fmt::{ + self, + Debug, + Formatter, + }, + future::Future, + pin::Pin, + sync::Arc, + task::{ + Context, + Poll, + }, +}; + +use anyhow::Context as _; +use astria_eyre::anyhow_to_eyre; +use async_trait::async_trait; +use bytes::Bytes; +use cnidarium::{ + RootHash, + StateDelta, + StateRead, +}; +use futures::TryStreamExt; +use pin_project_lite::pin_project; +use quick_cache::sync::Cache as QuickCache; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; + +use crate::Metrics; + +/// An in-memory cache of objects that belong in the verifiable store. +/// +/// A `None` value represents an item not present in the on-disk storage. +type VerifiableCache = Arc<QuickCache<String, Option<Bytes>>>; +/// An in-memory cache of objects that belong in the non-verifiable store. +/// +/// A `None` value represents an item not present in the on-disk storage. +type NonVerifiableCache = Arc<QuickCache<Vec<u8>, Option<Bytes>>>; + +#[derive(Clone)] +pub(crate) struct Snapshot { + inner: cnidarium::Snapshot, + verifiable_cache: VerifiableCache, + non_verifiable_cache: NonVerifiableCache, + metrics: &'static Metrics, +} + +impl Snapshot { + pub(super) fn new(inner: cnidarium::Snapshot, metrics: &'static Metrics) -> Self { + Self { + inner, + verifiable_cache: Arc::new(QuickCache::new(10_000)), + non_verifiable_cache: Arc::new(QuickCache::new(1_000)), + metrics, + } + } + + pub(super) fn into_inner(self) -> cnidarium::Snapshot { + self.inner + } + + pub(crate) fn new_delta(&self) -> StateDelta<Snapshot> { + StateDelta::new(self.clone()) + } + + pub(crate) async fn root_hash(&self) -> astria_eyre::Result<RootHash> { + self.inner.root_hash().await.map_err(anyhow_to_eyre) + } +} + +impl Debug for Snapshot { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +#[async_trait] +impl StateRead for Snapshot { + type GetRawFut = SnapshotFuture; + type NonconsensusPrefixRawStream = ReceiverStream<anyhow::Result<(Vec<u8>, Vec<u8>)>>; + type NonconsensusRangeRawStream = ReceiverStream<anyhow::Result<(Vec<u8>, Vec<u8>)>>; + type PrefixKeysStream = ReceiverStream<anyhow::Result<String>>; + type PrefixRawStream = ReceiverStream<anyhow::Result<(String, Vec<u8>)>>; + + fn get_raw(&self, key: &str) -> Self::GetRawFut { + get_raw( + key.to_owned(), + self.inner.clone(), + self.verifiable_cache.clone(), + self.metrics, + ) + } + + fn nonverifiable_get_raw(&self, key: &[u8]) -> Self::GetRawFut { + non_verifiable_get_raw( + key.to_owned(), + self.inner.clone(), + self.non_verifiable_cache.clone(), + self.metrics, + ) + } + + fn object_get<T: Any + Send + Sync + Clone>(&self, _key: &str) -> Option<T> { + // No ephemeral object cache in read-only `Snapshot`. + None + } + + fn object_type(&self, _key: &str) -> Option<TypeId> { + // No ephemeral object cache in read-only `Snapshot`. + None + } + + fn prefix_raw(&self, prefix: &str) -> Self::PrefixRawStream { + let (tx_prefix_item, rx_prefix_query) = mpsc::channel(10); + let inner_snapshot = self.inner.clone(); + let cache = self.verifiable_cache.clone(); + let metrics = self.metrics; + tokio::spawn(inner_snapshot.prefix_keys(prefix).try_for_each(move |key| { + let inner_snapshot = inner_snapshot.clone(); + let cache = cache.clone(); + let tx_prefix_item = tx_prefix_item.clone(); + async move { + let value = get_raw(key.clone(), inner_snapshot, cache, metrics) + .await? + .with_context(|| "should never be `None` value for streamed key")?; + let permit = tx_prefix_item + .reserve() + .await + .with_context(|| "failed to reserve space on the sending channel")?; + permit.send(Ok((key, value))); + Ok(()) + } + })); + ReceiverStream::new(rx_prefix_query) + } + + fn prefix_keys(&self, prefix: &str) -> Self::PrefixKeysStream { + self.inner.prefix_keys(prefix) + } + + /// NOTE: The cache is unusable here. + fn nonverifiable_prefix_raw(&self, prefix: &[u8]) -> Self::NonconsensusPrefixRawStream { + self.inner.nonverifiable_prefix_raw(prefix) + } + + /// NOTE: The cache is unusable here. + fn nonverifiable_range_raw( + &self, + prefix: Option<&[u8]>, + range: impl std::ops::RangeBounds<Vec<u8>>, + ) -> anyhow::Result<Self::NonconsensusRangeRawStream> { + self.inner.nonverifiable_range_raw(prefix, range) + } +} + +pin_project! { + pub struct SnapshotFuture { + #[pin] + inner: tokio::task::JoinHandle<anyhow::Result<Option<Vec<u8>>>> + } +} + +impl SnapshotFuture { + fn new<F>(future: F) -> Self + where + F: Future<Output = anyhow::Result<Option<Vec<u8>>>> + Send + 'static, + { + Self { + inner: tokio::task::spawn(future), + } + } +} + +impl Future for SnapshotFuture { + type Output = anyhow::Result<Option<Vec<u8>>>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { + let this = self.project(); + match this.inner.poll(cx) { + Poll::Ready(result) => { + Poll::Ready(result.expect("unrecoverable join error from tokio task")) + } + Poll::Pending => Poll::Pending, + } + } +} + +async fn get<S, K>(state: &S, key: K) -> anyhow::Result<Option<Vec<u8>>> +where + S: StateRead + ?Sized, + K: AsRef<str>, +{ + let key = key.as_ref(); + state + .get_raw(key) + .await + .with_context(|| format!("failed to get raw value under key `{key}`")) +} + +async fn non_verifiable_get<S, K>(state: &S, key: K) -> anyhow::Result<Option<Vec<u8>>> +where + S: StateRead, + K: AsRef<[u8]>, +{ + let key = key.as_ref(); + state.nonverifiable_get_raw(key).await.with_context(|| { + format!( + "failed to get nonverifiable raw value under key `{}`", + display_non_verifiable_key(key) + ) + }) +} + +fn get_raw( + key: String, + inner_snapshot: cnidarium::Snapshot, + cache: VerifiableCache, + metrics: &'static Metrics, +) -> SnapshotFuture { + SnapshotFuture::new(async move { + let maybe_value = match cache.get_value_or_guard_async(&key).await { + Ok(value) => { + metrics.increment_verifiable_cache_hit(); + value + } + Err(guard) => { + metrics.increment_verifiable_cache_miss(); + let value = get(&inner_snapshot, &key).await?.map(Bytes::from); + let _ = guard.insert(value.clone()); + value + } + }; + metrics.record_verifiable_cache_item_total(cache.len()); + Ok(maybe_value.map(Vec::from)) + }) +} + +fn non_verifiable_get_raw( + key: Vec<u8>, + inner_snapshot: cnidarium::Snapshot, + cache: NonVerifiableCache, + metrics: &'static Metrics, +) -> SnapshotFuture { + SnapshotFuture::new(async move { + let maybe_value = match cache.get_value_or_guard_async(&key).await { + Ok(value) => { + metrics.increment_non_verifiable_cache_hit(); + value + } + Err(guard) => { + metrics.increment_non_verifiable_cache_miss(); + let value = non_verifiable_get(&inner_snapshot, &key) + .await? + .map(Bytes::from); + let _ = guard.insert(value.clone()); + value + } + }; + metrics.record_non_verifiable_cache_item_total(cache.len()); + Ok(maybe_value.map(Vec::from)) + }) +} + +/// Provides a `String` version of the given key for display (logging) purposes, parsed from UTF-8 +/// if possible, falling back to base64 encoding. +fn display_non_verifiable_key(key: &[u8]) -> String { + String::from_utf8(key.to_vec()).unwrap_or_else(|_| telemetry::display::base64(key).to_string()) +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeMap; + + use cnidarium::StateWrite as _; + use tempfile::TempDir; + + use super::{ + super::Storage, + *, + }; + + const V_KEY: &str = "verifiable key"; + const NV_KEY: &[u8] = b"non-verifiable key"; + const VALUES: [[u8; 1]; 4] = [[1], [2], [3], [4]]; + + struct Fixture { + storage: Storage, + _temp_dir: TempDir, + } + + impl Fixture { + async fn new() -> Self { + let (metrics, _) = telemetry::metrics::ConfigBuilder::new() + .set_global_recorder(false) + .build(&()) + .unwrap(); + let metrics = Box::leak(Box::new(metrics)); + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("storage_test"); + let storage = Storage::load(db_path.clone(), vec![], metrics) + .await + .unwrap(); + Self { + storage, + _temp_dir: temp_dir, + } + } + } + + #[tokio::test] + async fn get_raw_should_succeed() { + #[track_caller] + fn assert_in_cache(snapshot: &Snapshot, value: &[u8]) { + let Some(serialized_value) = snapshot.verifiable_cache.get(V_KEY).unwrap() else { + panic!("should have value in cache"); + }; + assert_eq!(value.to_vec(), serialized_value); + } + + let Fixture { + storage, + _temp_dir, + } = Fixture::new().await; + + // `get_raw` should return `None` for non-existent value. + let snapshot = storage.latest_snapshot(); + assert!(snapshot.get_raw(V_KEY).await.unwrap().is_none()); + + // Write and commit data. + let mut state_delta = storage.new_delta_of_latest_snapshot(); + state_delta.put_raw(V_KEY.to_string(), VALUES[0].to_vec()); + storage.commit(state_delta).await.unwrap(); + + // `get_raw` on the latest snapshot should return the correct value and cache it. + let snapshot = storage.latest_snapshot(); + assert!(snapshot.verifiable_cache.is_empty()); + assert_eq!( + Some(VALUES[0].to_vec()), + snapshot.get_raw(V_KEY).await.unwrap() + ); + assert_eq!(1, snapshot.verifiable_cache.len()); + assert_in_cache(&snapshot, &VALUES[0]); + + // Write and commit different data under the same key. + let mut state_delta = storage.new_delta_of_latest_snapshot(); + state_delta.put_raw(V_KEY.to_string(), VALUES[1].to_vec()); + storage.commit(state_delta).await.unwrap(); + + // `get_raw` on a v0 snapshot should return the original value, and on the latest snapshot + // should return the updated value. Both caches should be updated. + let snapshot = storage.snapshot(0).unwrap(); + assert!(snapshot.verifiable_cache.is_empty()); + assert_eq!( + Some(VALUES[0].to_vec()), + snapshot.get_raw(V_KEY).await.unwrap() + ); + assert_eq!(1, snapshot.verifiable_cache.len()); + assert_in_cache(&snapshot, &VALUES[0]); + + let snapshot = storage.latest_snapshot(); + assert!(snapshot.verifiable_cache.is_empty()); + assert_eq!( + Some(VALUES[1].to_vec()), + snapshot.get_raw(V_KEY).await.unwrap() + ); + assert_eq!(1, snapshot.verifiable_cache.len()); + assert_in_cache(&snapshot, &VALUES[1]); + + // Check a clone of the latest snapshot has clone of the populated cache. + assert_eq!(1, storage.latest_snapshot().verifiable_cache.len()); + assert_in_cache(&storage.latest_snapshot(), &VALUES[1]); + } + + #[tokio::test] + async fn nonverifiable_get_raw_should_succeed() { + #[track_caller] + fn assert_in_cache(snapshot: &Snapshot, value: &[u8]) { + let Some(serialized_value) = snapshot.non_verifiable_cache.get(NV_KEY).unwrap() else { + panic!("should have value in cache"); + }; + assert_eq!(value.to_vec(), serialized_value); + } + + let Fixture { + storage, + _temp_dir, + } = Fixture::new().await; + + // `nonverifiable_get_raw` should return `None` for non-existent value. + let snapshot = storage.latest_snapshot(); + assert!( + snapshot + .nonverifiable_get_raw(NV_KEY) + .await + .unwrap() + .is_none() + ); + + // Write and commit data. + let mut state_delta = storage.new_delta_of_latest_snapshot(); + state_delta.nonverifiable_put_raw(NV_KEY.to_vec(), VALUES[0].to_vec()); + storage.commit(state_delta).await.unwrap(); + + // `nonverifiable_get_raw` on the latest snapshot should return the correct value and cache + // it. + let snapshot = storage.latest_snapshot(); + assert!(snapshot.non_verifiable_cache.is_empty()); + assert_eq!( + Some(VALUES[0].to_vec()), + snapshot.nonverifiable_get_raw(NV_KEY).await.unwrap() + ); + assert_eq!(1, snapshot.non_verifiable_cache.len()); + assert_in_cache(&snapshot, &VALUES[0]); + + // Write and commit different data under the same key. + let mut state_delta = storage.new_delta_of_latest_snapshot(); + state_delta.nonverifiable_put_raw(NV_KEY.to_vec(), VALUES[1].to_vec()); + storage.commit(state_delta).await.unwrap(); + + // `nonverifiable_get_raw` on a v0 snapshot should return the original value, and on the + // latest snapshot should return the updated value. Both caches should be updated. + let snapshot = storage.snapshot(0).unwrap(); + assert!(snapshot.non_verifiable_cache.is_empty()); + assert_eq!( + Some(VALUES[0].to_vec()), + snapshot.nonverifiable_get_raw(NV_KEY).await.unwrap() + ); + assert_eq!(1, snapshot.non_verifiable_cache.len()); + assert_in_cache(&snapshot, &VALUES[0]); + + let snapshot = storage.latest_snapshot(); + assert!(snapshot.non_verifiable_cache.is_empty()); + assert_eq!( + Some(VALUES[1].to_vec()), + snapshot.nonverifiable_get_raw(NV_KEY).await.unwrap() + ); + assert_eq!(1, snapshot.non_verifiable_cache.len()); + assert_in_cache(&snapshot, &VALUES[1]); + + // Check a clone of the latest snapshot has clone of the populated cache. + assert_eq!(1, storage.latest_snapshot().non_verifiable_cache.len()); + assert_in_cache(&storage.latest_snapshot(), &VALUES[1]); + } + + #[tokio::test] + async fn prefix_raw_should_succeed() { + let Fixture { + storage, + _temp_dir, + } = Fixture::new().await; + + // `prefix_raw` should return an empty stream for a non-existent prefix. + let snapshot = storage.latest_snapshot(); + let map: BTreeMap<_, _> = snapshot.prefix_raw(V_KEY).try_collect().await.unwrap(); + assert!(map.is_empty()); + + // Write and commit four entries under a common prefix. + let mut state_delta = storage.new_delta_of_latest_snapshot(); + let kv_iter = VALUES + .iter() + .enumerate() + .map(|(index, value)| (format!("common {index}"), value.to_vec())); + for (key, value) in kv_iter.clone() { + state_delta.put_raw(key, value); + } + storage.commit(state_delta).await.unwrap(); + + // Get a new snapshot, and populate its inner cache with two of the stored values by getting + // them. + let snapshot = storage.latest_snapshot(); + assert!(snapshot.verifiable_cache.is_empty()); + assert!(snapshot.get_raw("common 0").await.unwrap().is_some()); + assert!(snapshot.get_raw("common 2").await.unwrap().is_some()); + assert_eq!(2, snapshot.verifiable_cache.len()); + + // `prefix_raw` should return all the key value pairs and populate the cache. + let actual: BTreeMap<_, _> = snapshot.prefix_raw("com").try_collect().await.unwrap(); + let expected: BTreeMap<_, _> = kv_iter.collect(); + assert_eq!(expected, actual); + assert_eq!(4, snapshot.verifiable_cache.len()); + } +} diff --git a/crates/astria-sequencer/src/transaction/checks.rs b/crates/astria-sequencer/src/transaction/checks.rs index bff86e838e..d1da3f8c64 100644 --- a/crates/astria-sequencer/src/transaction/checks.rs +++ b/crates/astria-sequencer/src/transaction/checks.rs @@ -148,7 +148,6 @@ mod tests { }, }; use bytes::Bytes; - use cnidarium::StateDelta; use super::*; use crate::{ @@ -167,36 +166,36 @@ mod tests { StateReadExt as _, StateWriteExt as _, }, + storage::Storage, test_utils::calculate_rollup_data_submission_fee_from_state, }; #[tokio::test] async fn check_balance_total_fees_transfers_ok() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state_tx.put_base_prefix("astria".to_string()).unwrap(); - state_tx.put_native_asset(nria()).unwrap(); - state_tx + state_delta.put_base_prefix("astria".to_string()).unwrap(); + state_delta.put_native_asset(nria()).unwrap(); + state_delta .put_fees(FeeComponents::<Transfer>::new(12, 0)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<RollupDataSubmission>::new(0, 1)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<Ics20Withdrawal>::new(1, 0)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<InitBridgeAccount>::new(12, 0)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<BridgeLock>::new(0, 1)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<BridgeUnlock>::new(0, 0)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<BridgeSudoChange>::new(24, 0)) .unwrap(); @@ -205,27 +204,27 @@ mod tests { let alice = get_alice_signing_key(); let amount = 100; let data = Bytes::from_static(&[0; 32]); - let transfer_fee = state_tx + let transfer_fee = state_delta .get_fees::<Transfer>() .await .expect("should not error fetching transfer fees") .expect("transfer fees should be stored") .base(); - state_tx + state_delta .increase_balance( - &state_tx + &state_delta .try_base_prefixed(&alice.address_bytes()) .await .unwrap(), &nria(), transfer_fee - + calculate_rollup_data_submission_fee_from_state(&data, &state_tx).await, + + calculate_rollup_data_submission_fee_from_state(&data, &state_delta).await, ) .await .unwrap(); - state_tx + state_delta .increase_balance( - &state_tx + &state_delta .try_base_prefixed(&alice.address_bytes()) .await .unwrap(), @@ -240,7 +239,10 @@ mod tests { asset: other_asset.clone(), amount, fee_asset: nria().into(), - to: state_tx.try_base_prefixed(&[0; ADDRESS_LEN]).await.unwrap(), + to: state_delta + .try_base_prefixed(&[0; ADDRESS_LEN]) + .await + .unwrap(), }), Action::RollupDataSubmission(RollupDataSubmission { rollup_id: RollupId::from_unhashed_bytes([0; 32]), @@ -256,38 +258,39 @@ mod tests { .unwrap(); let signed_tx = tx.sign(&alice); - check_balance_for_total_fees_and_transfers(&signed_tx, &state_tx) + check_balance_for_total_fees_and_transfers(&signed_tx, &state_delta) .await .expect("sufficient balance for all actions"); } #[tokio::test] async fn check_balance_total_fees_and_transfers_insufficient_other_asset_balance() { - let storage = cnidarium::TempStorage::new().await.unwrap(); - let snapshot = storage.latest_snapshot(); - let mut state_tx = StateDelta::new(snapshot); + let storage = Storage::new_temp().await; + let mut state_delta = storage.new_delta_of_latest_snapshot(); - state_tx.put_base_prefix(ASTRIA_PREFIX.to_string()).unwrap(); - state_tx.put_native_asset(nria()).unwrap(); - state_tx + state_delta + .put_base_prefix(ASTRIA_PREFIX.to_string()) + .unwrap(); + state_delta.put_native_asset(nria()).unwrap(); + state_delta .put_fees(FeeComponents::<Transfer>::new(12, 0)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<RollupDataSubmission>::new(0, 1)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<Ics20Withdrawal>::new(1, 0)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<InitBridgeAccount>::new(12, 0)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<BridgeLock>::new(0, 1)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<BridgeUnlock>::new(0, 0)) .unwrap(); - state_tx + state_delta .put_fees(FeeComponents::<BridgeSudoChange>::new(24, 0)) .unwrap(); @@ -296,21 +299,21 @@ mod tests { let alice = get_alice_signing_key(); let amount = 100; let data = Bytes::from_static(&[0; 32]); - let transfer_fee = state_tx + let transfer_fee = state_delta .get_fees::<Transfer>() .await .expect("should not error fetching transfer fees") .expect("transfer fees should be stored") .base(); - state_tx + state_delta .increase_balance( - &state_tx + &state_delta .try_base_prefixed(&alice.address_bytes()) .await .unwrap(), &nria(), transfer_fee - + calculate_rollup_data_submission_fee_from_state(&data, &state_tx).await, + + calculate_rollup_data_submission_fee_from_state(&data, &state_delta).await, ) .await .unwrap(); @@ -320,7 +323,10 @@ mod tests { asset: other_asset.clone(), amount, fee_asset: nria().into(), - to: state_tx.try_base_prefixed(&[0; ADDRESS_LEN]).await.unwrap(), + to: state_delta + .try_base_prefixed(&[0; ADDRESS_LEN]) + .await + .unwrap(), }), Action::RollupDataSubmission(RollupDataSubmission { rollup_id: RollupId::from_unhashed_bytes([0; 32]), @@ -336,7 +342,7 @@ mod tests { .unwrap(); let signed_tx = tx.sign(&alice); - let err = check_balance_for_total_fees_and_transfers(&signed_tx, &state_tx) + let err = check_balance_for_total_fees_and_transfers(&signed_tx, &state_delta) .await .err() .unwrap();