diff --git a/.cargo/audit.toml b/.cargo/audit.toml index bfe280fdd3..a24651942e 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -6,4 +6,10 @@ ignore = [ # `proc-macro-error` is Unmaintained. It is a transient dependency of borsh crates, so cannot # easily be replaced. "RUSTSEC-2024-0370", + # `instant` is Unmaintained. It is a transient dependency of `isahc`, `wiremock` and `ethers`, so + # cannot easily be replaced. + "RUSTSEC-2024-0384", + # `derivative` is Unmaintained. It is a transient dependency of many crates including several + # penumbra ones, so cannot easily be replaced. + "RUSTSEC-2024-0388", ] diff --git a/Cargo.lock b/Cargo.lock index 5d33c6200b..c09d7339fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -507,31 +507,28 @@ dependencies = [ "astria-eyre", "astria-sequencer-client", "astria-telemetry", - "astria-test-utils", - "async-trait", - "axum", + "base64 0.21.7", "bytes", "futures", "hex", + "http 0.2.12", + "http-body 0.4.6", "humantime", - "insta", + "hyper 0.14.30", "itertools 0.12.1", "pbjson-types", "pin-project-lite", "prost", "serde", "serde_json", - "sha2 0.10.8", - "tempfile", - "thiserror", "tokio", "tokio-stream", - "tokio-test", "tokio-util 0.7.12", "tonic 0.10.2", + "tower 0.5.1", + "tower-http", "tracing", "tryhard", - "wiremock", ] [[package]] @@ -717,7 +714,7 @@ dependencies = [ "tokio-stream", "tokio-util 0.7.12", "tonic 0.10.2", - "tower", + "tower 0.4.13", "tracing", "tryhard", "wiremock", @@ -868,7 +865,7 @@ dependencies = [ "thiserror", "tokio", "tonic 0.10.2", - "tower", + "tower 0.4.13", "tower-abci", "tower-actor", "tower-http", @@ -952,7 +949,7 @@ dependencies = [ "tokio-test", "tokio-util 0.7.12", "tonic 0.10.2", - "tower", + "tower 0.4.13", "tracing", "tryhard", "wiremock", @@ -1177,7 +1174,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", ] @@ -4779,7 +4776,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] @@ -4816,7 +4813,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util 0.7.12", - "tower", + "tower 0.4.13", "tracing", ] @@ -5837,7 +5834,7 @@ dependencies = [ "tendermint-light-client-verifier", "time", "tonic 0.10.2", - "tower", + "tower 0.4.13", "tracing", ] @@ -6032,7 +6029,7 @@ dependencies = [ "tokio-stream", "tokio-util 0.7.12", "tonic 0.10.2", - "tower", + "tower 0.4.13", "tower-service", "tracing", ] @@ -6110,9 +6107,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -8021,6 +8018,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util 0.7.12", ] [[package]] @@ -8171,7 +8169,7 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -8199,7 +8197,7 @@ dependencies = [ "prost", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -8252,6 +8250,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-abci" version = "0.12.0" @@ -8267,7 +8279,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util 0.6.10", - "tower", + "tower 0.4.13", "tracing", ] @@ -8282,7 +8294,7 @@ dependencies = [ "thiserror", "tokio", "tokio-util 0.7.12", - "tower", + "tower 0.4.13", "tracing", ] @@ -8302,6 +8314,7 @@ dependencies = [ "pin-project-lite", "tower-layer", "tower-service", + "tracing", ] [[package]] diff --git a/charts/ibc-test.just b/charts/ibc-test.just index 0e330f8da1..6b8e472328 100644 --- a/charts/ibc-test.just +++ b/charts/ibc-test.just @@ -89,10 +89,7 @@ run-without-native tag=defaultTag: # Execute the transfer from Celestia to the Rollup just ibc-test _do-ibc-transfer {{defaultNamespace}} {{sequencer_sudo_address}} - # Add transfer/channel-0/utia as fee-asset - docker run --rm --network host $ASTRIA_CLI_IMAGE sequencer sudo fee-asset add --private-key {{sequencer_sudo_pkey}} --asset transfer/channel-0/utia --sequencer-url {{sequencer_rpc_url}} --sequencer.chain-id {{sequencer_chain_id}} # check that sequencer balance updated correctly - EXPECTED_BALANCE=$(echo "1 * {{transfer_amount}}" | bc) for i in {1..50} do @@ -110,6 +107,11 @@ run-without-native tag=defaultTag: exit 1 fi + # Add transfer/channel-0/utia as allowed fee-asset + docker run --rm --network host $ASTRIA_CLI_IMAGE sequencer sudo fee-asset add --private-key {{sequencer_sudo_pkey}} --asset transfer/channel-0/utia --sequencer-url {{sequencer_rpc_url}} --sequencer.chain-id {{sequencer_chain_id}} + + # TODO: query allowd fee asset verifying succefull addition + [no-cd] run tag=defaultTag: #!/usr/bin/env bash @@ -149,23 +151,6 @@ run tag=defaultTag: # Execute the transfer from Celstia to sequencer with compat address just ibc-test _do-ibc-transfer {{defaultNamespace}} {{compat_address}} - # check that celestia balance updated correctly - for i in {1..50} - do - current_celestia_balance=$(just ibc-test get-celestia-balance) - echo "check $i, balance: $current_celestia_balance, expected: $expected_celestia_balance" - if (( $expected_celestia_balance == $current_celestia_balance )); then - expected_celestia_balance_found="1" - break - else - sleep 1 - fi - done - if [[ -z $expected_celestia_balance_found ]]; then - echo "expected celestia balance was not found after withdraw; IBC transfer from Celestia to the Rollup failed" - exit 1 - fi - # check that sequencer balance updated correctly ASTRIA_CLI_IMAGE="{{cli_image}}{{ if tag != '' { replace(':#', '#', tag) } else { '' } }}" EXPECTED_BALANCE=$(echo "1 * {{transfer_amount}}" | bc) @@ -283,23 +268,6 @@ run-timeout tag=defaultTag: # Execute the transfer from Celstia to sequencer with compat address just ibc-test _do-ibc-transfer {{defaultNamespace}} {{compat_address}} - # check that celestia balance updated correctly - for i in {1..50} - do - current_celestia_balance=$(just ibc-test get-celestia-balance) - echo "check $i, balance: $current_celestia_balance, expected: $expected_celestia_balance" - if (( $expected_celestia_balance == $current_celestia_balance )); then - expected_celestia_balance_found="1" - break - else - sleep 1 - fi - done - if [[ -z $expected_celestia_balance_found ]]; then - echo "expected celestia balance was not found after withdraw; IBC transfer from Celestia to the Rollup failed" - exit 1 - fi - # check that sequencer balance updated correctly ASTRIA_CLI_IMAGE="{{cli_image}}{{ if tag != '' { replace(':#', '#', tag) } else { '' } }}" EXPECTED_BALANCE=$(echo "1 * {{transfer_amount}}" | bc) diff --git a/crates/astria-auctioneer/Cargo.toml b/crates/astria-auctioneer/Cargo.toml index 77e9b6ca58..4a70b7f5eb 100644 --- a/crates/astria-auctioneer/Cargo.toml +++ b/crates/astria-auctioneer/Cargo.toml @@ -2,15 +2,12 @@ name = "astria-auctioneer" version = "0.0.1" edition = "2021" -rust-version = "1.76" +rust-version = "1.81" license = "MIT OR Apache-2.0" readme = "README.md" repository = "https://github.com/astriaorg/astria" homepage = "https://astria.org" -[[bin]] -name = "astria-auctioneer" - [dependencies] astria-build-info = { path = "../astria-build-info", features = ["runtime"] } astria-core = { path = "../astria-core", features = ["serde", "client"] } @@ -21,20 +18,16 @@ telemetry = { package = "astria-telemetry", path = "../astria-telemetry", featur "display", ] } -async-trait = { workspace = true } -axum = { workspace = true } +base64 = { workspace = true } bytes = { workspace = true } futures = { workspace = true } hex = { workspace = true } humantime = { workspace = true } itertools = { workspace = true } pbjson-types = { workspace = true } -pin-project-lite = { workspace = true } prost = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } -sha2 = { workspace = true } -thiserror = { workspace = true } tokio = { workspace = true, features = [ "macros", "rt-multi-thread", @@ -46,20 +39,19 @@ tokio-util = { workspace = true, features = ["rt"] } tracing = { workspace = true, features = ["attributes"] } tryhard = { workspace = true } tonic = { workspace = true } -tokio-stream = { workspace = true, features = ["net"] } +tokio-stream = { workspace = true, features = ["sync"] } + +pin-project-lite = "0.2.15" +tower-http = { version = "0.4.4", features = ["map-response-body", "trace"] } +tower = { version = "0.5.1", features = ["util"] } +hyper.workspace = true +http = "0.2.11" +http-body = "0.4.5" [dev-dependencies] -astria-core = { path = "../astria-core", features = ["client"] } config = { package = "astria-config", path = "../astria-config", features = [ "tests", ] } -insta = { workspace = true, features = ["json"] } -tempfile = { workspace = true } -test_utils = { package = "astria-test-utils", path = "../astria-test-utils", features = [ - "geth", -] } -tokio-test = { workspace = true } -wiremock = { workspace = true } [build-dependencies] astria-build-info = { path = "../astria-build-info", features = ["build"] } diff --git a/crates/astria-auctioneer/src/auction/allocation_rule.rs b/crates/astria-auctioneer/src/auction/allocation_rule.rs deleted file mode 100644 index 4eab5a23d5..0000000000 --- a/crates/astria-auctioneer/src/auction/allocation_rule.rs +++ /dev/null @@ -1,32 +0,0 @@ -//! The allocation rule is the mechanism by which the auction processes incoming bids and determines -//! the winner. -use super::Bundle; - -pub(super) struct FirstPrice { - highest_bid: Option, -} - -impl FirstPrice { - pub(super) fn new() -> Self { - Self { - highest_bid: None, - } - } - - /// Submit a bundle with a bid. - /// - /// Returns `true` if the bid is accepted as the highest bid. - pub(crate) fn bid(&mut self, bundle: Bundle) -> bool { - if bundle.bid() > self.highest_bid.as_ref().map_or(0, Bundle::bid) { - self.highest_bid = Some(bundle); - true - } else { - false - } - } - - /// Returns the winner of the auction, if one exists. - pub(crate) fn winner(self) -> Option { - self.highest_bid - } -} diff --git a/crates/astria-auctioneer/src/auction/builder.rs b/crates/astria-auctioneer/src/auction/builder.rs deleted file mode 100644 index 9038fb76f9..0000000000 --- a/crates/astria-auctioneer/src/auction/builder.rs +++ /dev/null @@ -1,86 +0,0 @@ -use std::time::Duration; - -use astria_core::{ - generated::sequencerblock::v1::sequencer_service_client::SequencerServiceClient, - primitive::v1::{ - asset, - RollupId, - }, -}; -use tokio::sync::mpsc; -use tokio_util::sync::CancellationToken; - -use super::{ - Auction, - Handle, - Id, - SequencerKey, -}; -use crate::Metrics; - -pub(crate) struct Builder { - pub(crate) metrics: &'static Metrics, - pub(crate) shutdown_token: CancellationToken, - - /// The endpoint for the sequencer gRPC service used to get pending nonces - pub(crate) sequencer_grpc_client: SequencerServiceClient, - /// The endpoint for the sequencer ABCI service used to submit transactions - pub(crate) sequencer_abci_client: sequencer_client::HttpClient, - /// The amount of time to wait after a commit before closing the auction for bids and - /// submitting the resulting transaction - pub(crate) latency_margin: Duration, - /// The ID of the auction to be run - pub(crate) auction_id: Id, - /// The key used to sign sequencer transactions - pub(crate) sequencer_key: SequencerKey, - /// The denomination of the fee asset used in the sequencer transactions - pub(crate) fee_asset_denomination: asset::Denom, - /// The chain ID used for sequencer transactions - pub(crate) sequencer_chain_id: String, - /// The rollup ID used for `RollupDataSubmission` with the auction result - pub(crate) rollup_id: RollupId, -} - -impl Builder { - pub(crate) fn build(self) -> (Handle, Auction) { - let Self { - metrics, - shutdown_token, - sequencer_grpc_client, - sequencer_abci_client, - latency_margin, - auction_id, - fee_asset_denomination, - rollup_id, - sequencer_key, - sequencer_chain_id, - } = self; - - // TODO: get the capacities from config or something instead of using a magic number - let (commands_tx, commands_rx) = mpsc::channel(16); - let (new_bundles_tx, new_bundles_rx) = mpsc::channel(16); - - let auction = Auction { - metrics, - shutdown_token, - sequencer_grpc_client, - sequencer_abci_client, - commands_rx, - new_bundles_rx, - latency_margin, - id: auction_id, - sequencer_key, - fee_asset_denomination, - sequencer_chain_id, - rollup_id, - }; - - ( - Handle { - commands_tx, - new_bundles_tx, - }, - auction, - ) - } -} diff --git a/crates/astria-auctioneer/src/auction/manager.rs b/crates/astria-auctioneer/src/auction/manager.rs deleted file mode 100644 index e85f6f3f1f..0000000000 --- a/crates/astria-auctioneer/src/auction/manager.rs +++ /dev/null @@ -1,202 +0,0 @@ -/// The auction Manager is responsible for managing running auction futures and their -/// associated handles. -use std::collections::HashMap; - -use astria_core::{ - generated::sequencerblock::v1::sequencer_service_client::SequencerServiceClient, - primitive::v1::{ - asset, - RollupId, - }, -}; -use astria_eyre::eyre::{ - self, - OptionExt as _, - WrapErr as _, -}; -use tokio_util::{ - sync::CancellationToken, - task::JoinMap, -}; -use tonic::transport::Endpoint; -use tracing::{ - info, - instrument, -}; - -use super::{ - Bundle, - Handle, - Id, - SequencerKey, -}; -use crate::flatten_result; - -pub(crate) struct Builder { - pub(crate) metrics: &'static crate::Metrics, - pub(crate) shutdown_token: CancellationToken, - - /// The gRPC endpoint for the sequencer service used by auctions. - pub(crate) sequencer_grpc_endpoint: String, - /// The ABCI endpoint for the sequencer service used by auctions. - pub(crate) sequencer_abci_endpoint: String, - /// The amount of time to run the auction timer for. - pub(crate) latency_margin: std::time::Duration, - /// The private key used to sign sequencer transactions. - pub(crate) sequencer_private_key_path: String, - /// The prefix for the address used to sign sequencer transactions - pub(crate) sequencer_address_prefix: String, - /// The denomination of the fee asset used in the sequencer transactions - pub(crate) fee_asset_denomination: asset::Denom, - /// The chain ID for sequencer transactions - pub(crate) sequencer_chain_id: String, - /// The rollup ID for the `RollupDataSubmission`s with auction results - pub(crate) rollup_id: String, -} - -impl Builder { - pub(crate) fn build(self) -> eyre::Result { - let Self { - metrics, - shutdown_token, - sequencer_grpc_endpoint, - sequencer_abci_endpoint, - latency_margin, - fee_asset_denomination, - rollup_id, - sequencer_private_key_path, - sequencer_address_prefix, - sequencer_chain_id, - } = self; - - let sequencer_key = SequencerKey::builder() - .path(sequencer_private_key_path) - .prefix(sequencer_address_prefix) - .try_build() - .wrap_err("failed to load sequencer private key")?; - info!(address = %sequencer_key.address(), "loaded sequencer signer"); - - let sequencer_grpc_uri: tonic::transport::Uri = sequencer_grpc_endpoint - .parse() - .wrap_err("failed to parse sequencer grpc endpoint as URI")?; - let sequencer_grpc_client = - SequencerServiceClient::new(Endpoint::from(sequencer_grpc_uri).connect_lazy()); - - let sequencer_abci_client = - sequencer_client::HttpClient::new(sequencer_abci_endpoint.as_str()) - .wrap_err("failed constructing sequencer abci client")?; - - let rollup_id = RollupId::from_unhashed_bytes(&rollup_id); - - Ok(Manager { - metrics, - shutdown_token, - sequencer_grpc_client, - sequencer_abci_client, - latency_margin, - running_auctions: JoinMap::new(), - auction_handles: HashMap::new(), - sequencer_key, - fee_asset_denomination, - sequencer_chain_id, - rollup_id, - }) - } -} - -pub(crate) struct Manager { - metrics: &'static crate::Metrics, - shutdown_token: CancellationToken, - sequencer_grpc_client: SequencerServiceClient, - sequencer_abci_client: sequencer_client::HttpClient, - latency_margin: std::time::Duration, - running_auctions: JoinMap>, - auction_handles: HashMap, - sequencer_key: SequencerKey, - fee_asset_denomination: asset::Denom, - sequencer_chain_id: String, - rollup_id: RollupId, -} - -impl Manager { - #[instrument(skip(self))] - pub(crate) fn new_auction(&mut self, auction_id: Id) { - let (handle, auction) = super::Builder { - metrics: self.metrics, - shutdown_token: self.shutdown_token.child_token(), - sequencer_grpc_client: self.sequencer_grpc_client.clone(), - sequencer_abci_client: self.sequencer_abci_client.clone(), - latency_margin: self.latency_margin, - auction_id, - sequencer_key: self.sequencer_key.clone(), - fee_asset_denomination: self.fee_asset_denomination.clone(), - sequencer_chain_id: self.sequencer_chain_id.clone(), - rollup_id: self.rollup_id, - } - .build(); - - // spawn and save handle - self.running_auctions.spawn(auction_id, auction.run()); - self.auction_handles.insert(auction_id, handle); - } - - pub(crate) fn abort_auction(&mut self, auction_id: Id) -> eyre::Result<()> { - let handle = self - .auction_handles - .get_mut(&auction_id) - .ok_or_eyre("unable to get handle for the given auction")?; - - handle - .try_abort() - .wrap_err("failed to send command to abort auction")?; - Ok(()) - } - - #[instrument(skip(self))] - pub(crate) fn start_timer(&mut self, auction_id: Id) -> eyre::Result<()> { - let handle = self - .auction_handles - .get_mut(&auction_id) - .ok_or_eyre("unable to get handle for the given auction")?; - - handle - .start_timer() - .wrap_err("failed to send command to start timer to auction")?; - - Ok(()) - } - - #[instrument(skip(self))] - pub(crate) fn start_processing_bids(&mut self, auction_id: Id) -> eyre::Result<()> { - let handle = self - .auction_handles - .get_mut(&auction_id) - .ok_or_eyre("unable to get handle for the given auction")?; - - handle - .start_processing_bids() - .wrap_err("failed to send command to start processing bids")?; - Ok(()) - } - - pub(crate) fn try_send_bundle(&mut self, auction_id: Id, bundle: Bundle) -> eyre::Result<()> { - self.auction_handles - .get_mut(&auction_id) - .ok_or_eyre("unable to get handle for the given auction")? - .try_send_bundle(bundle) - .wrap_err("failed to add bundle to auction") - } - - pub(crate) async fn join_next(&mut self) -> Option<(Id, eyre::Result<()>)> { - if let Some((auction_id, result)) = self.running_auctions.join_next().await { - // TODO: get rid of this expect somehow - self.auction_handles - .remove(&auction_id) - .expect("handle should always exist for running auction"); - - Some((auction_id, flatten_result(result))) - } else { - None - } - } -} diff --git a/crates/astria-auctioneer/src/auction/mod.rs b/crates/astria-auctioneer/src/auction/mod.rs deleted file mode 100644 index 208a9c862e..0000000000 --- a/crates/astria-auctioneer/src/auction/mod.rs +++ /dev/null @@ -1,386 +0,0 @@ -//! The Auction is repsonsible for running an auction for a given block. An auction advances through -//! the following states, controlled via the `commands_rx` channel received: -//! 1. The auction is initialized but not yet started (i.e. no commands have been received). -//! 2. After receiving a `Command::StartProcessingBids`, the auction will start processing incoming -//! bundles from `new_bundles_rx`. -//! 3. After receiving a `Command::StartTimer`, the auction will set a timer for `latency_margin` -//! (denominated in milliseconds). -//! 4. Once the timer expires, the auction will choose a winner based on its `AllocationRule` and -//! submit it to the sequencer. -//! -//! ## Aborting an Auction -//! The auction may also be aborted at any point before the timer expires by receiving a -//! `Command::Abort`. This will cause the auction to return early without submitting a winner, -//! effectively discarding any bundles that were processed. -//! This is used for leveraging optimsitic execution, running an auction for block data that has -//! been proposed in the sequencer network's cometBFT but not yet finalized. -//! We assume that most proposals are adopted in cometBFT, allowing us to buy a few hundred -//! milliseconds before they are finalized. However, if multiple rounds of voting invalidate a -//! proposal, we can abort the auction and avoid submitting a potentially invalid bundle. In this -//! case, the auction will abort and a new one will be created for the newly processed proposal -//! (which will be received by the Optimistic Executor via the optimistic block stream). -//! -//! ## Auction Result -//! The auction result is a `Bundle` that is signed by the Auctioneer and submitted to the rollup -//! via the sequencer. The rollup defines a trusted Auctioneer address that it allows to submit -//! bundles, and thus must verify the Auctioneer's signature over this bundle. -//! -//! Since the sequencer does not include the transaction signer's metadata with the `RollupData` -//! events that it saves in its block data, the Auctioneer must include this metadata in its -//! `RollupDataSubmission`s. This is done by wrapping the winning `Bundle` object in an -//! `AuctionResult` object, which is then serialized into the `RollupDataSubmission`. -//! -//! ## Submission to Sequencer -//! The auction will submit the winning bundle to the sequencer via the `broadcast_tx_sync` ABCI(?) -//! endpoint. -//! In order to save time on fetching a nonce, the auction will fetch the next pending nonce as soon -//! as it received the signal to start the timer. This corresponds to the sequencer block being -//! committed, thus providing the latest pending nonce. - -mod builder; -use std::time::Duration; - -use allocation_rule::FirstPrice; -use astria_core::{ - generated::sequencerblock::v1::{ - sequencer_service_client::SequencerServiceClient, - GetPendingNonceRequest, - }, - primitive::v1::{ - asset, - RollupId, - }, - protocol::transaction::v1::Transaction, -}; -use astria_eyre::eyre::{ - self, - eyre, - Context, - OptionExt as _, -}; -pub(crate) use builder::Builder; -use sequencer_client::{ - tendermint_rpc::endpoint::broadcast::tx_sync, - Address, - SequencerClientExt, -}; -use telemetry::display::base64; -use tokio::{ - select, - sync::mpsc, -}; -use tokio_util::sync::CancellationToken; -use tracing::{ - debug, - error, - info, - instrument, - warn, - Instrument, -}; - -use crate::{ - bundle::Bundle, - sequencer_key::SequencerKey, - Metrics, -}; - -pub(crate) mod manager; - -#[derive(Hash, Eq, PartialEq, Clone, Copy, Debug)] -pub(crate) struct Id([u8; 32]); - -impl Id { - pub(crate) fn from_sequencer_block_hash(block_hash: [u8; 32]) -> Self { - Self(block_hash) - } -} - -impl AsRef<[u8]> for Id { - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - -pub(crate) use manager::Manager; - -mod allocation_rule; - -enum Command { - StartProcessingBids, - StartTimer, - Abort, -} - -pub(crate) struct Handle { - commands_tx: mpsc::Sender, - new_bundles_tx: mpsc::Sender, -} - -impl Handle { - pub(crate) fn try_abort(&mut self) -> eyre::Result<()> { - self.commands_tx - .try_send(Command::Abort) - .wrap_err("unable to send abort command to auction")?; - - Ok(()) - } - - pub(crate) fn start_processing_bids(&mut self) -> eyre::Result<()> { - self.commands_tx - .try_send(Command::StartProcessingBids) - .wrap_err("unable to send command to start processing bids to auction")?; - Ok(()) - } - - pub(crate) fn start_timer(&mut self) -> eyre::Result<()> { - self.commands_tx - .try_send(Command::StartTimer) - .wrap_err("unable to send command to start time to auction")?; - - Ok(()) - } - - pub(crate) fn try_send_bundle(&mut self, bundle: Bundle) -> eyre::Result<()> { - self.new_bundles_tx - .try_send(bundle) - .wrap_err("bid channel full")?; - - Ok(()) - } -} - -pub(crate) struct Auction { - #[allow(dead_code)] - metrics: &'static Metrics, - shutdown_token: CancellationToken, - - /// The sequencer's gRPC client, used for fetching pending nonces - sequencer_grpc_client: SequencerServiceClient, - /// The sequencer's ABCI client, used for submitting transactions - sequencer_abci_client: sequencer_client::HttpClient, - /// Channel for receiving commands sent via the handle - commands_rx: mpsc::Receiver, - /// Channel for receiving new bundles - new_bundles_rx: mpsc::Receiver, - /// The time between receiving a block commitment - latency_margin: Duration, - /// The ID of the auction - id: Id, - /// The key used to sign transactions on the sequencer - sequencer_key: SequencerKey, - /// Fee asset for submitting transactions - fee_asset_denomination: asset::Denom, - /// The chain ID used for sequencer transactions - sequencer_chain_id: String, - /// Rollup ID to submit the auction result to - rollup_id: RollupId, -} - -impl Auction { - pub(crate) async fn run(mut self) -> eyre::Result<()> { - let mut latency_margin_timer = None; - // TODO: do we want to make this configurable to allow for more complex allocation rules? - let mut allocation_rule = FirstPrice::new(); - let mut auction_is_open = false; - - let mut nonce_fetch: Option>> = None; - - let auction_result = loop { - select! { - biased; - - () = self.shutdown_token.cancelled() => break Err(eyre!("received shutdown signal")), - - // get the auction winner when the timer expires - _ = async { latency_margin_timer.as_mut().unwrap() }, if latency_margin_timer.is_some() => { - break Ok(allocation_rule.winner()); - } - - Some(cmd) = self.commands_rx.recv() => { - match cmd { - Command::Abort => { - // abort the auction early - break Err(eyre!("auction {id} received abort signal", id = base64(&self.id))); - }, - Command::StartProcessingBids => { - if auction_is_open { - break Err(eyre!("auction received signal to start processing bids twice")); - } - auction_is_open = true; - }, - Command::StartTimer => { - if !auction_is_open { - break Err(eyre!("auction received signal to start timer before signal to start processing bids")); - } - - // set the timer - latency_margin_timer = Some(tokio::time::sleep(self.latency_margin)); - - // we wait for commit because we want the pending nonce from the committed block - nonce_fetch = { - let client = self.sequencer_grpc_client.clone(); - let &address = self.sequencer_key.address(); - Some(tokio::task::spawn(async move { get_pending_nonce(client, address, self.metrics).await })) - }; - } - } - } - - Some(bundle) = self.new_bundles_rx.recv(), if auction_is_open => { - if allocation_rule.bid(bundle.clone()) { - info!( - auction.id = %base64(self.id), - bundle.bid = %bundle.bid(), - "received new highest bid" - ); - } else { - debug!( - auction.id = %base64(self.id), - bundle.bid = %bundle.bid(), - "received bid lower than current highest bid, discarding" - ); - } - } - - } - }; - - // TODO: separate the rest of this to a different object, e.g. AuctionResult? - // TODO: flatten this or get rid of the option somehow? - // await the nonce fetch result - let nonce = nonce_fetch - .expect( - "should have received commit and fetched pending nonce before exiting the auction \ - loop", - ) - .await - .wrap_err("get_pending_nonce task failed")? - .wrap_err("failed to fetch nonce")?; - - // serialize, sign and submit to the sequencer - let transaction_body = auction_result - .wrap_err("auction failed unexpectedly")? - .ok_or_eyre("auction ended with no winning bid")? - .into_transaction_body( - nonce, - self.rollup_id, - self.sequencer_key.clone(), - self.fee_asset_denomination.clone(), - self.sequencer_chain_id, - ); - - let transaction = transaction_body.sign(self.sequencer_key.signing_key()); - - let submission_result = select! { - biased; - - // TODO: should this be Ok(())? or Ok("received shutdown signal")? - () = self.shutdown_token.cancelled() => Err(eyre!("received shutdown signal during auction result submission")), - - result = submit_transaction(self.sequencer_abci_client.clone(), transaction, self.metrics) => { - // TODO: how to handle submission failure better? - match result { - Ok(resp) => { - // TODO: handle failed submission instead of just logging the result - info!(auction.id = %base64(self.id), auction.result = %resp.log, "auction result submitted to sequencer"); - Ok(()) - }, - Err(e) => { - error!(auction.id = %base64(self.id), err = %e, "failed to submit auction result to sequencer"); - Err(e).wrap_err("failed to submit auction result to sequencer") - }, - } - } - }; - submission_result - } -} - -#[instrument(skip_all, fields(%address, err))] -async fn get_pending_nonce( - client: SequencerServiceClient, - address: Address, - // TODO: emit metrics here - #[allow(unused_variables)] metrics: &'static Metrics, -) -> eyre::Result { - let span = tracing::Span::current(); - let retry_cfg = tryhard::RetryFutureConfig::new(1024) - .exponential_backoff(Duration::from_millis(100)) - .max_delay(Duration::from_secs(2)) - .on_retry( - move |attempt: u32, next_delay: Option, error: &tonic::Status| { - let wait_duration = next_delay - .map(humantime::format_duration) - .map(tracing::field::display); - warn!( - parent: &span, - attempt, - wait_duration, - error = error as &dyn std::error::Error, - "attempt to get pending nonce failed; retrying after backoff", - ); - futures::future::ready(()) - }, - ); - - let nonce = tryhard::retry_fn(|| { - let mut client = client.clone(); - - async move { - client - .get_pending_nonce(GetPendingNonceRequest { - address: Some(address.into_raw()), - }) - .await - } - }) - .with_config(retry_cfg) - .in_current_span() - .await - .wrap_err("failed to get pending nonce")? - .into_inner() - .inner; - - Ok(nonce) -} - -async fn submit_transaction( - client: sequencer_client::HttpClient, - transaction: Transaction, - // TODO: emit metrics here - #[allow(unused_variables)] metrics: &'static Metrics, -) -> eyre::Result { - let span = tracing::Span::current(); - let retry_cfg = tryhard::RetryFutureConfig::new(1024) - .exponential_backoff(Duration::from_millis(100)) - .max_delay(Duration::from_secs(2)) - .on_retry( - move |attempt: u32, - next_delay: Option, - error: &sequencer_client::extension_trait::Error| { - let wait_duration = next_delay - .map(humantime::format_duration) - .map(tracing::field::display); - warn!( - parent: &span, - attempt, - wait_duration, - error = error as &dyn std::error::Error, - "attempt to submit transaction failed; retrying after backoff", - ); - futures::future::ready(()) - }, - ); - - tryhard::retry_fn(|| { - let client = client.clone(); - let transaction = transaction.clone(); - - async move { client.submit_transaction_sync(transaction).await } - }) - .with_config(retry_cfg) - .in_current_span() - .await - .wrap_err("failed to submit transaction") -} diff --git a/crates/astria-auctioneer/src/auctioneer/inner.rs b/crates/astria-auctioneer/src/auctioneer/inner.rs deleted file mode 100644 index e2da31f057..0000000000 --- a/crates/astria-auctioneer/src/auctioneer/inner.rs +++ /dev/null @@ -1,162 +0,0 @@ -use std::time::Duration; - -use astria_eyre::eyre::{ - self, - WrapErr as _, -}; -use itertools::Itertools as _; -use tokio::{ - select, - time::timeout, -}; -use tokio_util::{ - sync::CancellationToken, - task::JoinMap, -}; -use tracing::{ - error, - info, - warn, -}; - -use crate::{ - auction, - flatten_result, - optimistic_executor, - Config, - Metrics, -}; - -pub(super) struct Auctioneer { - /// Used to signal the service to shutdown - shutdown_token: CancellationToken, - - /// The different long-running tasks that make up the Auctioneer - tasks: JoinMap<&'static str, eyre::Result<()>>, -} - -impl Auctioneer { - const OPTIMISTIC_EXECUTOR: &'static str = "optimistic_executor"; - const _BUNDLE_COLLECTOR: &'static str = "bundle_collector"; - - /// Creates an [`Auctioneer`] service from a [`Config`] and [`Metrics`]. - pub(super) fn new( - cfg: Config, - metrics: &'static Metrics, - shutdown_token: CancellationToken, - ) -> eyre::Result { - let Config { - sequencer_grpc_endpoint, - sequencer_abci_endpoint, - latency_margin_ms, - rollup_grpc_endpoint, - rollup_id, - sequencer_chain_id, - sequencer_private_key_path, - sequencer_address_prefix, - fee_asset_denomination, - .. - } = cfg; - - let mut tasks = JoinMap::new(); - - let auctions = auction::manager::Builder { - metrics, - shutdown_token: shutdown_token.clone(), - sequencer_grpc_endpoint: sequencer_grpc_endpoint.clone(), - sequencer_abci_endpoint, - latency_margin: Duration::from_millis(latency_margin_ms), - sequencer_private_key_path, - sequencer_address_prefix, - fee_asset_denomination, - sequencer_chain_id, - rollup_id: rollup_id.clone(), - } - .build() - .wrap_err("failed to initialize auction manager")?; - - let optimistic_executor = optimistic_executor::Builder { - metrics, - shutdown_token: shutdown_token.clone(), - sequencer_grpc_endpoint, - rollup_id, - rollup_grpc_endpoint, - auctions, - } - .build(); - - tasks.spawn(Self::OPTIMISTIC_EXECUTOR, async { - optimistic_executor - .startup() - .await - .wrap_err("optimistic executor startup failed")? - .run() - .await - }); - - Ok(Self { - shutdown_token, - tasks, - }) - } - - /// Runs the [`Auctioneer`] service until it received an exit signal, or one of the constituent - /// tasks either ends unexpectedly or returns an error. - pub(super) async fn run(mut self) -> eyre::Result<()> { - let reason = select! { - biased; - - () = self.shutdown_token.cancelled() => { - Ok("auctioneer received shutdown signal") - }, - - Some((name, res)) = self.tasks.join_next() => { - flatten_result(res) - .wrap_err_with(|| format!("task `{name}` failed")) - .map(|()| "task `{name}` exited unexpectedly") - } - }; - - match reason { - Ok(msg) => info!(%msg, "received shutdown signal"), - Err(err) => error!(%err, "shutting down due to error"), - } - - self.shutdown().await; - Ok(()) - } - - /// Initiates shutdown of the Auctioneer and waits for all the constituent tasks to shut down. - async fn shutdown(mut self) { - self.shutdown_token.cancel(); - - let shutdown_loop = async { - while let Some((name, res)) = self.tasks.join_next().await { - let message = "task shut down"; - match flatten_result(res) { - Ok(()) => { - info!(name, message); - } - Err(err) => { - error!(name, %err, message); - } - } - } - }; - - info!("signalling all tasks to shut down; waiting 25 seconds for exit"); - if timeout(Duration::from_secs(25), shutdown_loop) - .await - .is_err() - { - let tasks = self.tasks.keys().join(", "); - warn!( - tasks = format_args!("[{tasks}]"), - "aborting all tasks that have not yet shut down" - ); - } else { - info!("all tasks have shut down regularly"); - } - info!("shutting down"); - } -} diff --git a/crates/astria-auctioneer/src/auctioneer/inner/auction/allocation_rule.rs b/crates/astria-auctioneer/src/auctioneer/inner/auction/allocation_rule.rs new file mode 100644 index 0000000000..0cc3475dbe --- /dev/null +++ b/crates/astria-auctioneer/src/auctioneer/inner/auction/allocation_rule.rs @@ -0,0 +1,50 @@ +//! The allocation rule is the mechanism by which the auction processes incoming bids and determines +//! the winner. +use std::sync::Arc; + +use tracing::{ + info, + instrument, +}; + +use super::Bundle; + +pub(super) struct FirstPrice { + highest_bid: Option>, +} + +impl FirstPrice { + pub(super) fn new() -> Self { + Self { + highest_bid: None, + } + } + + /// Submit a bundle with a bid. + /// + /// Returns `true` if the bid is accepted as the highest bid. + // TODO: identify the incumbant and candidate by their hash? + #[instrument(skip_all, fields( + current_winner.bid = self.highest_bid.as_ref().map(|bundle| bundle.bid()), + candidate.bid = candidate.bid(), + ))] + pub(super) fn bid(&mut self, candidate: &Arc) { + let winner = if let Some(current) = self.highest_bid.as_mut() { + if candidate.bid() > current.bid() { + *current = candidate.clone(); + "candidate" + } else { + "incumbant" + } + } else { + self.highest_bid = Some(candidate.clone()); + "candidate" + }; + info!("highest bidder is {winner}"); + } + + /// Returns the winner of the auction, if one exists. + pub(super) fn winner(self) -> Option> { + self.highest_bid + } +} diff --git a/crates/astria-auctioneer/src/auctioneer/inner/auction/factory.rs b/crates/astria-auctioneer/src/auctioneer/inner/auction/factory.rs new file mode 100644 index 0000000000..c85e67ca8c --- /dev/null +++ b/crates/astria-auctioneer/src/auctioneer/inner/auction/factory.rs @@ -0,0 +1,78 @@ +/// The auction Manager is responsible for managing running auction futures and their +/// associated handles. +use astria_core::{ + primitive::v1::{ + RollupId, + asset, + }, + sequencerblock::v1::block::FilteredSequencerBlock, +}; +use tokio::sync::{ + mpsc, + oneshot, +}; +use tokio_util::sync::CancellationToken; + +use super::{ + Auction, + PendingNonceSubscriber, + SequencerKey, + Worker, +}; + +pub(in crate::auctioneer::inner) struct Factory { + #[allow(dead_code)] + pub(in crate::auctioneer::inner) metrics: &'static crate::Metrics, + pub(in crate::auctioneer::inner) sequencer_abci_client: sequencer_client::HttpClient, + pub(in crate::auctioneer::inner) latency_margin: std::time::Duration, + pub(in crate::auctioneer::inner) sequencer_key: SequencerKey, + pub(in crate::auctioneer::inner) fee_asset_denomination: asset::Denom, + pub(in crate::auctioneer::inner) sequencer_chain_id: String, + pub(in crate::auctioneer::inner) rollup_id: RollupId, + pub(in crate::auctioneer::inner) pending_nonce: PendingNonceSubscriber, + pub(in crate::auctioneer::inner) cancellation_token: CancellationToken, +} + +impl Factory { + pub(in crate::auctioneer::inner) fn start_new( + &self, + block: &FilteredSequencerBlock, + ) -> Auction { + let id = super::Id::from_sequencer_block_hash(block.block_hash()); + let block_hash = *block.block_hash(); + let height = block.height().into(); + + // TODO: get the capacities from config or something instead of using a magic number + let (start_bids_tx, start_bids_rx) = oneshot::channel(); + let (start_timer_tx, start_timer_rx) = oneshot::channel(); + let (bundles_tx, bundles_rx) = mpsc::unbounded_channel(); + + let cancellation_token = self.cancellation_token.child_token(); + let auction = Worker { + sequencer_abci_client: self.sequencer_abci_client.clone(), + start_bids: Some(start_bids_rx), + start_timer: Some(start_timer_rx), + bundles: bundles_rx, + latency_margin: self.latency_margin, + id, + sequencer_key: self.sequencer_key.clone(), + fee_asset_denomination: self.fee_asset_denomination.clone(), + sequencer_chain_id: self.sequencer_chain_id.clone(), + rollup_id: self.rollup_id, + pending_nonce: self.pending_nonce.clone(), + cancellation_token: cancellation_token.clone(), + }; + + Auction { + id, + block_hash, + height, + hash_of_executed_block_on_rollup: None, + start_bids: Some(start_bids_tx), + start_timer: Some(start_timer_tx), + bundles: bundles_tx, + cancellation_token, + worker: tokio::task::spawn(auction.run()), + } + } +} diff --git a/crates/astria-auctioneer/src/auctioneer/inner/auction/mod.rs b/crates/astria-auctioneer/src/auctioneer/inner/auction/mod.rs new file mode 100644 index 0000000000..a06a10e2c8 --- /dev/null +++ b/crates/astria-auctioneer/src/auctioneer/inner/auction/mod.rs @@ -0,0 +1,250 @@ +//! The Auction is repsonsible for running an auction for a given block. An auction advances through +//! the following states, controlled via the `commands_rx` channel received: +//! 1. The auction is initialized but not yet started (i.e. no commands have been received). +//! 2. After receiving a `Command::StartProcessingBids`, the auction will start processing incoming +//! bundles from `new_bundles_rx`. +//! 3. After receiving a `Command::StartTimer`, the auction will set a timer for `latency_margin` +//! (denominated in milliseconds). +//! 4. Once the timer expires, the auction will choose a winner based on its `AllocationRule` and +//! submit it to the sequencer. +//! +//! ## Aborting an Auction +//! The auction may also be aborted at any point before the timer expires. +//! This will cause the auction to return early without submitting a winner, +//! effectively discarding any bundles that were processed. +//! This is used for leveraging optimsitic execution, running an auction for block data that has +//! been proposed in the sequencer network's cometBFT but not yet finalized. +//! We assume that most proposals are adopted in cometBFT, allowing us to buy a few hundred +//! milliseconds before they are finalized. However, if multiple rounds of voting invalidate a +//! proposal, we can abort the auction and avoid submitting a potentially invalid bundle. In this +//! case, the auction will abort and a new one will be created for the newly processed proposal +//! (which will be received by the Optimistic Executor via the optimistic block stream). +//! +//! ## Auction Result +//! The auction result is a `Bundle` that is signed by the Auctioneer and submitted to the rollup +//! via the sequencer. The rollup defines a trusted Auctioneer address that it allows to submit +//! bundles, and thus must verify the Auctioneer's signature over this bundle. +//! +//! Since the sequencer does not include the transaction signer's metadata with the `RollupData` +//! events that it saves in its block data, the Auctioneer must include this metadata in its +//! `RollupDataSubmission`s. This is done by wrapping the winning `Bundle` object in an +//! `AuctionResult` object, which is then serialized into the `RollupDataSubmission`. +//! +//! ## Submission to Sequencer +//! The auction will submit the winning bundle to the sequencer via the `broadcast_tx_sync` ABCI(?) +//! endpoint. +//! In order to save time on fetching a nonce, the auction will fetch the next pending nonce as soon +//! as it received the signal to start the timer. This corresponds to the sequencer block being +//! committed, thus providing the latest pending nonce. + +use std::{ + fmt::Display, + sync::Arc, +}; + +use astria_core::{ + self, + sequencerblock::v1::block::BlockHash, +}; +use astria_eyre::eyre::{ + self, + Context, + bail, + ensure, + eyre, +}; +use futures::{ + Future, + FutureExt as _, +}; +use sequencer_client::tendermint_rpc::endpoint::broadcast::tx_sync; +use telemetry::display::base64; +use tokio::{ + sync::{ + mpsc, + oneshot, + }, + task::JoinHandle, +}; +use tokio_util::sync::CancellationToken; +use tracing::instrument; + +use super::PendingNonceSubscriber; +use crate::{ + block::Commitment, + bundle::Bundle, + sequencer_key::SequencerKey, +}; + +pub(super) mod factory; +pub(super) use factory::Factory; +mod allocation_rule; +mod worker; +use worker::Worker; + +#[derive(Hash, Eq, PartialEq, Clone, Copy, Debug)] +pub(super) struct Id([u8; 32]); + +impl Id { + pub(super) fn from_sequencer_block_hash(block_hash: &BlockHash) -> Self { + Self(block_hash.get()) + } +} + +impl std::fmt::Display for Id { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use base64::{ + display::Base64Display, + engine::general_purpose::STANDARD, + }; + Base64Display::new(self.0.as_ref(), &STANDARD).fmt(f) + } +} + +pub(super) struct Auction { + id: Id, + block_hash: BlockHash, + height: u64, + hash_of_executed_block_on_rollup: Option<[u8; 32]>, + start_bids: Option>, + start_timer: Option>, + bundles: mpsc::UnboundedSender>, + cancellation_token: CancellationToken, + worker: JoinHandle>, +} + +impl Auction { + pub(super) fn abort(&self) { + self.worker.abort(); + } + + pub(super) fn cancel(&self) { + self.cancellation_token.cancel(); + } + + pub(in crate::auctioneer::inner) fn id(&self) -> &Id { + &self.id + } + + // TODO: identify the commitment in span fields + #[instrument(skip_all, fields(id = %self.id), err)] + pub(super) fn start_timer(&mut self, commitment: Commitment) -> eyre::Result<()> { + ensure!( + &self.block_hash == commitment.block_hash() && self.height == commitment.height(), + "commitment does not match auction; auction.block_hash = `{}`, auction.height = `{}`, \ + commitment.block_hash = `{}`, commitment.height = `{}`", + self.block_hash, + self.height, + commitment.block_hash(), + commitment.height(), + ); + if let Some(start_timer) = self.start_timer.take() { + start_timer + .send(()) + .map_err(|()| eyre!("the auction worker's start timer channel was already dropped")) + } else { + Err(eyre!( + "a previous commitment already triggered the start timer of the auction" + )) + } + } + + // TODO: identify the executed block in the span fields + #[instrument(skip_all, fields(id = %self.id), err)] + pub(in crate::auctioneer::inner) fn start_bids( + &mut self, + block: crate::block::Executed, + ) -> eyre::Result<()> { + ensure!( + &self.block_hash == block.sequencer_block_hash(), + "executed block does not match auction; auction.block_hash = `{}`, \ + executed.block_hash = `{}`", + &self.block_hash, + block.sequencer_block_hash(), + ); + + if let Some(start_bids) = self.start_bids.take() { + start_bids.send(()).map_err(|()| { + eyre!("the auction worker's start bids channel was already dropped") + })?; + } else { + bail!("a previous executed block already triggered the auction to start bids"); + } + + let prev_block = self + .hash_of_executed_block_on_rollup + .replace(block.rollup_block_hash()); + debug_assert!(prev_block.is_none()); + + Ok(()) + } + + // TODO: Use a refinement type for the parente rollup block hash + #[instrument(skip_all, fields( + id = %self.id, + bundle.sequencer_block_hash = %bundle.base_sequencer_block_hash(), + bundle.parent_roll_block_hash = %base64(bundle.parent_rollup_block_hash()), + + ), err)] + pub(in crate::auctioneer::inner) fn forward_bundle_to_auction( + &mut self, + bundle: Arc, + ) -> eyre::Result<()> { + // TODO: emit some more information about auctoin ID, expected vs actual parent block hash, + // tacked block hash, provided block hash, etc. + let Some(block_hash_of_executed) = self.hash_of_executed_block_on_rollup else { + eyre::bail!( + "received a new bundle but the current auction has not yet + received an execute block from the rollup; dropping the bundle" + ); + }; + ensure!( + &self.block_hash == bundle.base_sequencer_block_hash() + && block_hash_of_executed == bundle.parent_rollup_block_hash(), + "bundle does not match auction; auction.sequenecer_block_hash = `{}`, \ + auction.parent_block_hash = `{}`, bundle. = `{}`, bundle.height = `{}`", + self.block_hash, + base64(block_hash_of_executed), + bundle.base_sequencer_block_hash(), + base64(bundle.parent_rollup_block_hash()), + ); + self.bundles + .send(bundle) + .wrap_err("failed to submit bundle to auction; the bundle is lost") + } +} + +impl Future for Auction { + type Output = (Id, Result<(), tokio::task::JoinError>); + + fn poll( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + let res = std::task::ready!(self.worker.poll_unpin(cx)); + std::task::Poll::Ready((self.id, res.map(|_| ()))) + } +} + +pub(super) enum Summary { + CancelledDuringAuction, + NoBids, + Submitted(tx_sync::Response), +} + +impl Display for Summary { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Summary::CancelledDuringAuction => { + f.write_str("received cancellation signal during auction loop") + } + Summary::NoBids => f.write_str("auction finished without bids"), + Summary::Submitted(rsp) => write!( + f, + "auction winner submitted; Sequencer responed with ABCI code `{}`, log `{}`", + rsp.code.value(), + rsp.log, + ), + } + } +} diff --git a/crates/astria-auctioneer/src/auctioneer/inner/auction/worker.rs b/crates/astria-auctioneer/src/auctioneer/inner/auction/worker.rs new file mode 100644 index 0000000000..2b58b1830e --- /dev/null +++ b/crates/astria-auctioneer/src/auctioneer/inner/auction/worker.rs @@ -0,0 +1,226 @@ +//! The Auction is repsonsible for running an auction for a given block. An auction advances through +//! the following states, controlled via the `commands_rx` channel received: +//! 1. The auction is initialized but not yet started (i.e. no commands have been received). +//! 2. After receiving a `Command::StartProcessingBids`, the auction will start processing incoming +//! bundles from `new_bundles_rx`. +//! 3. After receiving a `Command::StartTimer`, the auction will set a timer for `latency_margin` +//! (denominated in milliseconds). +//! 4. Once the timer expires, the auction will choose a winner based on its `AllocationRule` and +//! submit it to the sequencer. +//! +//! ## Aborting an Auction +//! The auction may also be aborted at any point before the timer expires. +//! This will cause the auction to return early without submitting a winner, +//! effectively discarding any bundles that were processed. +//! This is used for leveraging optimsitic execution, running an auction for block data that has +//! been proposed in the sequencer network's cometBFT but not yet finalized. +//! We assume that most proposals are adopted in cometBFT, allowing us to buy a few hundred +//! milliseconds before they are finalized. However, if multiple rounds of voting invalidate a +//! proposal, we can abort the auction and avoid submitting a potentially invalid bundle. In this +//! case, the auction will abort and a new one will be created for the newly processed proposal +//! (which will be received by the Optimistic Executor via the optimistic block stream). +//! +//! ## Auction Result +//! The auction result is a `Bundle` that is signed by the Auctioneer and submitted to the rollup +//! via the sequencer. The rollup defines a trusted Auctioneer address that it allows to submit +//! bundles, and thus must verify the Auctioneer's signature over this bundle. +//! +//! Since the sequencer does not include the transaction signer's metadata with the `RollupData` +//! events that it saves in its block data, the Auctioneer must include this metadata in its +//! `RollupDataSubmission`s. This is done by wrapping the winning `Bundle` object in an +//! `AuctionResult` object, which is then serialized into the `RollupDataSubmission`. +//! +//! ## Submission to Sequencer +//! The auction will submit the winning bundle to the sequencer via the `broadcast_tx_sync` ABCI(?) +//! endpoint. +//! In order to save time on fetching a nonce, the auction will fetch the next pending nonce as soon +//! as it received the signal to start the timer. This corresponds to the sequencer block being +//! committed, thus providing the latest pending nonce. + +use std::{ + sync::Arc, + time::Duration, +}; + +use astria_core::primitive::v1::{ + RollupId, + asset, +}; +use astria_eyre::eyre::{ + self, + Context, + bail, +}; +use sequencer_client::SequencerClientExt; +use tokio::{ + select, + sync::oneshot, +}; +use tokio_util::sync::CancellationToken; +use tracing::{ + Level, + info, + instrument, +}; + +use super::{ + PendingNonceSubscriber, + Summary, + allocation_rule::FirstPrice, +}; +use crate::{ + bundle::Bundle, + sequencer_key::SequencerKey, +}; + +pub(super) struct Worker { + /// The sequencer's ABCI client, used for submitting transactions + pub(super) sequencer_abci_client: sequencer_client::HttpClient, + pub(super) start_bids: Option>, + pub(super) start_timer: Option>, + /// Channel for receiving new bundles + pub(super) bundles: tokio::sync::mpsc::UnboundedReceiver>, + /// The time between receiving a block commitment + pub(super) latency_margin: Duration, + /// The ID of the auction + pub(super) id: super::Id, + /// The key used to sign transactions on the sequencer + pub(super) sequencer_key: SequencerKey, + /// Fee asset for submitting transactions + pub(super) fee_asset_denomination: asset::Denom, + /// The chain ID used for sequencer transactions + pub(super) sequencer_chain_id: String, + /// Rollup ID to submit the auction result to + pub(super) rollup_id: RollupId, + pub(super) pending_nonce: PendingNonceSubscriber, + pub(super) cancellation_token: CancellationToken, +} + +impl Worker { + // FIXME: consider using Valuable for the return case. + // See this discussion: https://github.com/tokio-rs/tracing/discussions/1906 + #[instrument( + skip_all, + fields(id = %self.id), + err(level = Level::WARN, Display), + ret(Display), + )] + pub(super) async fn run(mut self) -> eyre::Result { + let Some(auction_result) = self + .cancellation_token + .clone() + .run_until_cancelled(self.run_auction_loop()) + .await + else { + return Ok(Summary::CancelledDuringAuction); + }; + let Some(winner) = auction_result.wrap_err("auction failed while waiting for bids")? else { + return Ok(Summary::NoBids); + }; + + // TODO: report the pending nonce that we ended up using. + let transaction = Arc::unwrap_or_clone(winner) + .into_transaction_body( + self.pending_nonce.get(), + self.rollup_id, + &self.sequencer_key, + self.fee_asset_denomination.clone(), + self.sequencer_chain_id, + ) + .sign(self.sequencer_key.signing_key()); + + // NOTE: Submit fire-and-forget style. If the submission didn't make it in time, + // it's likey lost. + // TODO: We can consider providing a very tight retry mechanism. Maybe resubmit once + // if the response didn't take too long? But it's probably a bad idea to even try. + // Can we detect if a submission failed due to a bad nonce? In this case, we could + // immediately ("optimistically") submit with the most recent pending nonce (if the + // publisher updated it in the meantime) or just nonce + 1 (if it didn't yet update)? + + let submission_fut = { + let client = self.sequencer_abci_client.clone(); + tokio::time::timeout(Duration::from_secs(30), async move { + client + .submit_transaction_sync(transaction) + .await + .wrap_err("submission request failed") + }) + }; + tokio::pin!(submission_fut); + + loop { + select!( + () = self.cancellation_token.clone().cancelled_owned(), if !self.cancellation_token.is_cancelled() => { + info!( + "received cancellation token while waiting for Sequencer to respond to \ + transaction submission; still waiting for submission until timeout" + ); + } + + res = &mut submission_fut => { + break match res + .wrap_err("submission of auction winner timed out before receiving a response from Sequencer") + { + Ok(Ok(rsp)) => Ok(Summary::Submitted(rsp)), + Err(err) | Ok(Err(err)) => Err(err), + } + } + ); + } + } + + async fn run_auction_loop(&mut self) -> eyre::Result>> { + let mut latency_margin_timer = None; + // TODO: do we want to make this configurable to allow for more complex allocation rules? + let mut allocation_rule = FirstPrice::new(); + let mut auction_is_open = false; + + loop { + select! { + biased; + + _ = async { latency_margin_timer.as_mut().unwrap() }, if latency_margin_timer.is_some() => { + info!("timer is up; bids left unprocessed: {}", self.bundles.len()); + break Ok(allocation_rule.winner()); + } + + Ok(()) = async { self.start_bids.as_mut().unwrap().await }, if self.start_bids.is_some() => { + let mut channel = self + .start_bids + .take() + .expect("inside an arm that that checks start_bids == Some"); + channel.close(); + // TODO: if the timer is already running, report how much time is left for the bids + auction_is_open = true; + } + + Ok(()) = async { self.start_timer.as_mut().unwrap().await }, if self.start_timer.is_some() => { + let mut channel = self + .start_timer + .take() + .expect("inside an arm that checks start_timer == Some"); + channel.close(); + if !auction_is_open { + info!( + "received signal to start the auction timer before signal to start \ + processing bids; that's ok but eats into the time allotment of the \ + auction" + ); + } + + // TODO: Emit an event to report start and endpoint of the auction. + latency_margin_timer = Some(tokio::time::sleep(self.latency_margin)); + } + + // TODO: this is an unbounded channel. Can we process multiple bids at a time? + Some(bundle) = self.bundles.recv(), if auction_is_open => { + allocation_rule.bid(&bundle); + } + + else => { + bail!("all channels are closed; the auction cannot continue") + } + } + } + } +} diff --git a/crates/astria-auctioneer/src/auctioneer/inner/mod.rs b/crates/astria-auctioneer/src/auctioneer/inner/mod.rs new file mode 100644 index 0000000000..1871048904 --- /dev/null +++ b/crates/astria-auctioneer/src/auctioneer/inner/mod.rs @@ -0,0 +1,434 @@ +use std::{ + sync::Arc, + time::Duration, +}; + +use astria_core::{ + primitive::v1::{ + Address, + RollupId, + }, + sequencerblock::v1::block::FilteredSequencerBlock, +}; +use astria_eyre::eyre::{ + self, + OptionExt as _, + WrapErr as _, + bail, +}; +use futures::{ + Future, + StreamExt as _, + stream::FuturesUnordered, +}; +use tokio::{ + select, + task::JoinHandle, +}; +use tokio_util::sync::CancellationToken; +use tracing::{ + Level, + Span, + error, + field, + info, + info_span, + instrument, + warn, +}; + +use crate::{ + Config, + Metrics, + rollup_channel::{ + BundleStream, + ExecuteOptimisticBlockStream, + }, + sequencer_channel::{ + BlockCommitmentStream, + OptimisticBlockStream, + SequencerChannel, + }, + sequencer_key::SequencerKey, +}; + +mod auction; + +/// The implementation of the auctioneer business logic. +pub(super) struct Inner { + auction_factory: auction::Factory, + block_commitments: BlockCommitmentStream, + bundles: BundleStream, + cancelled_auctions: FuturesUnordered, + executed_blocks: ExecuteOptimisticBlockStream, + running_auction: Option, + optimistic_blocks: OptimisticBlockStream, + pending_nonce: PendingNoncePublisher, + rollup_id: RollupId, + shutdown_token: CancellationToken, +} + +impl Inner { + /// Creates an [`Auctioneer`] service from a [`Config`] and [`Metrics`]. + pub(super) fn new( + config: Config, + metrics: &'static Metrics, + shutdown_token: CancellationToken, + ) -> eyre::Result { + let Config { + sequencer_grpc_endpoint, + sequencer_abci_endpoint, + latency_margin_ms, + rollup_grpc_endpoint, + rollup_id, + sequencer_chain_id, + sequencer_private_key_path, + sequencer_address_prefix, + fee_asset_denomination, + .. + } = config; + + let rollup_id = RollupId::from_unhashed_bytes(rollup_id); + let rollup_channel = crate::rollup_channel::open(&rollup_grpc_endpoint)?; + let sequencer_channel = crate::sequencer_channel::open(&sequencer_grpc_endpoint)?; + + let sequencer_key = SequencerKey::builder() + .path(sequencer_private_key_path) + .prefix(sequencer_address_prefix) + .try_build() + .wrap_err("failed to load sequencer private key")?; + info!(address = %sequencer_key.address(), "loaded sequencer signer"); + + let pending_nonce = + PendingNoncePublisher::new(sequencer_channel.clone(), *sequencer_key.address()); + + let sequencer_abci_client = + sequencer_client::HttpClient::new(sequencer_abci_endpoint.as_str()) + .wrap_err("failed constructing sequencer abci client")?; + + // TODO: Rearchitect this thing + let auction_factory = auction::Factory { + metrics, + sequencer_abci_client, + latency_margin: Duration::from_millis(latency_margin_ms), + sequencer_key: sequencer_key.clone(), + fee_asset_denomination, + sequencer_chain_id, + rollup_id, + pending_nonce: pending_nonce.subscribe(), + cancellation_token: shutdown_token.child_token(), + }; + + Ok(Self { + auction_factory, + block_commitments: sequencer_channel.open_get_block_commitment_stream(), + bundles: rollup_channel.open_bundle_stream(), + cancelled_auctions: FuturesUnordered::new(), + executed_blocks: rollup_channel.open_execute_optimistic_block_stream(), + optimistic_blocks: sequencer_channel.open_get_optimistic_block_stream(rollup_id), + rollup_id, + running_auction: None, + shutdown_token, + pending_nonce, + }) + } + + /// Runs the [`Auctioneer`] service until it received an exit signal, or one of the constituent + /// tasks either ends unexpectedly or returns an error. + pub(super) async fn run(mut self) -> eyre::Result<()> { + let reason: eyre::Result<&str> = { + // This is a long running loop. Errors are emitted inside the handlers. + loop { + select! { + biased; + + () = self.shutdown_token.clone().cancelled_owned() => { + break Ok("received shutdown signal"); + }, + + res = self.handle_event() => { + if let Err(err) = res { + break Err(err); + } + } + } + } + }; + + self.shutdown(reason).await + } + + async fn handle_event(&mut self) -> eyre::Result<()> { + select!( + res = self.optimistic_blocks.next() => { + let res = res.ok_or_eyre("optimistic block stream closed")?; + let _ = self.handle_optimistic_block(res); + }, + + res = self.block_commitments.next() => { + let res = res.ok_or_eyre("block commitment stream closed")?; + let _ = self.handle_block_commitment(res); + }, + + res = self.executed_blocks.next() => { + let res = res.ok_or_eyre("executed block stream closed")?; + let _ = self.handle_executed_block(res); + } + + (id, res) = async { self.running_auction.as_mut().unwrap().await }, if self.running_auction.is_some() => { + let _ = self.handle_completed_auction(id, res); + } + + Some(res) = self.bundles.next() => { + let _ = self.handle_bundle(res); + } + + res = &mut self.pending_nonce => { + match res { + Ok(()) => bail!("endless pending nonce publisher task exicted unexpectedly"), + Err(err) => return Err(err).wrap_err("pending nonce publisher task panicked"), + } + } + + Some((id, res)) = self.cancelled_auctions.next() => { + let _ = self.handle_cancelled_auction(id, res); + } + ); + Ok(()) + } + + /// Handles the result of an auction running to completion. + /// + /// This method only exists to ensure that panicking auctions receive an event. + /// It is assumed that auctions that ran to completion (returnin a success or failure) + /// will emit an event in their own span. + #[instrument(skip_all, fields(%auction_id), err)] + fn handle_completed_auction( + &mut self, + auction_id: auction::Id, + res: Result<(), tokio::task::JoinError>, + ) -> Result<(), tokio::task::JoinError> { + self.running_auction.take(); + res + } + + /// Handles the result of cancelled auctions. + /// + /// This method only exists to ensure that panicking auctions receive an event. + /// It is assumed that auctions that ran to completion (returnin a success or failure) + /// will emit an event in their own span. + #[instrument(skip_all, fields(%auction_id), err(level = Level::INFO))] + fn handle_cancelled_auction( + &self, + auction_id: auction::Id, + res: Result<(), tokio::task::JoinError>, + ) -> Result<(), tokio::task::JoinError> { + res + } + + #[instrument(skip_all, fields(block_hash = field::Empty), err)] + fn handle_optimistic_block( + &mut self, + optimistic_block: eyre::Result, + ) -> eyre::Result<()> { + let optimistic_block = + optimistic_block.wrap_err("encountered problem receiving optimistic block message")?; + + Span::current().record("block_hash", field::display(optimistic_block.block_hash())); + + let new_auction = self.auction_factory.start_new(&optimistic_block); + info!(auction_id = %new_auction.id(), "started new auction"); + + if let Some(old_auction) = self.running_auction.replace(new_auction) { + old_auction.cancel(); + info!(auction_id = %old_auction.id(), "cancelled running auction"); + self.cancelled_auctions.push(old_auction); + } + + // TODO: do conversion && sending in one operation + let base_block = crate::block::Optimistic::new(optimistic_block) + .try_into_base_block(self.rollup_id) + // FIXME: give this their proper wire names + .wrap_err("failed to create BaseBlock from FilteredSequencerBlock")?; + self.executed_blocks + .try_send(base_block) + .wrap_err("failed to forward block to execution stream")?; + + Ok(()) + } + + #[instrument(skip_all, fields(block_hash = field::Empty), err)] + fn handle_block_commitment( + &mut self, + commitment: eyre::Result, + ) -> eyre::Result<()> { + let block_commitment = commitment.wrap_err("failed to receive block commitment")?; + Span::current().record("block_hash", field::display(block_commitment.block_hash())); + + if let Some(running_auction) = &mut self.running_auction { + running_auction + .start_timer(block_commitment) + .wrap_err("failed to start timer")?; + info!(auction_id = %running_auction.id(), "started auction timer"); + } else { + info!( + "received a block commitment but did not start auction timer because no auction \ + was running" + ); + } + + Ok(()) + } + + #[instrument(skip_all, fields(block_hash = field::Empty), err)] + fn handle_executed_block( + &mut self, + executed_block: eyre::Result, + ) -> eyre::Result<()> { + let executed_block = executed_block.wrap_err("failed to receive executed block")?; + Span::current().record( + "block_hash", + field::display(executed_block.sequencer_block_hash()), + ); + if let Some(running_auction) = &mut self.running_auction { + running_auction + .start_bids(executed_block) + .wrap_err("failed to start processing bids")?; + info!( + auction_id = %running_auction.id(), + "set auction to start processing bids based on executed block", + ); + } else { + info!( + "received an executed block but did not set auction to start processing bids \ + because no auction was running" + ); + } + Ok(()) + } + + #[instrument(skip_all, fields(block_hash = field::Empty), err)] + fn handle_bundle(&mut self, bundle: eyre::Result) -> eyre::Result<()> { + let bundle = Arc::new(bundle.wrap_err("received problematic bundle")?); + Span::current().record( + "block_hash", + field::display(bundle.base_sequencer_block_hash()), + ); + if let Some(running_auction) = &mut self.running_auction { + running_auction + .forward_bundle_to_auction(bundle) + .wrap_err("failed to forward bundle to auction")?; + info!( + auction_id = %running_auction.id(), + "forwarded bundle to auction" + ); + } else { + info!( + "received a bundle but did not forward it to the auction because no auction was \ + running", + ); + } + Ok(()) + } + + #[instrument(skip_all)] + async fn shutdown(mut self, reason: eyre::Result<&'static str>) -> eyre::Result<()> { + const WAIT_BEFORE_ABORT: Duration = Duration::from_secs(25); + + // Necessary if we got here because of another reason than receiving an external + // shutdown signal. + self.shutdown_token.cancel(); + + let message = format!( + "waiting {} for all constituent tasks to shutdown before aborting", + humantime::format_duration(WAIT_BEFORE_ABORT), + ); + match &reason { + Ok(reason) => info!(%reason, message), + Err(reason) => error!(%reason, message), + }; + if let Some(running_auction) = self.running_auction.take() { + running_auction.abort(); + } + reason.map(|_| ()) + } +} + +#[derive(Clone, Debug)] +struct PendingNonceSubscriber { + inner: tokio::sync::watch::Receiver, +} + +impl PendingNonceSubscriber { + fn get(&self) -> u32 { + *self.inner.borrow() + } +} + +/// Fetches the latest pending nonce for a given address every 500ms. +// TODO: should this provide some kind of feedback mechanism from the +// auction submission? Automatic incrementing for example? Notificatoin +// that the nonce was actually bad? +struct PendingNoncePublisher { + sender: tokio::sync::watch::Sender, + task: JoinHandle<()>, +} + +impl PendingNoncePublisher { + fn subscribe(&self) -> PendingNonceSubscriber { + PendingNonceSubscriber { + inner: self.sender.subscribe(), + } + } + + fn new(channel: SequencerChannel, address: Address) -> Self { + use tokio::time::{ + MissedTickBehavior, + timeout_at, + }; + // TODO: make this configurable. Right now they assume a Sequencer block time of 2s, + // so this is fetching nonce up to 4 times a block. + const FETCH_INTERVAL: Duration = Duration::from_millis(500); + const FETCH_TIMEOUT: Duration = FETCH_INTERVAL.saturating_mul(2); + let (tx, _) = tokio::sync::watch::channel(0); + Self { + sender: tx.clone(), + task: tokio::task::spawn(async move { + let mut interval = tokio::time::interval(FETCH_INTERVAL); + interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + let mut fetch = None; + loop { + select!( + instant = interval.tick(), if fetch.is_none() => { + fetch = Some(Box::pin( + timeout_at(instant + FETCH_TIMEOUT, channel.get_pending_nonce(address)))); + } + res = async { fetch.as_mut().unwrap().await }, if fetch.is_some() => { + fetch.take(); + let span = info_span!("fetch pending nonce"); + match res.map_err(eyre::Report::new) { + Ok(Ok(nonce)) => { + info!(nonce, %address, "received new pending from sequencer"); + tx.send_replace(nonce); + } + Ok(Err(error)) | Err(error) => span.in_scope(|| warn!(%error, "failed fetching pending nonce")), + } + } + ); + } + }), + } + } +} + +impl Future for PendingNoncePublisher { + type Output = Result<(), tokio::task::JoinError>; + + fn poll( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + use futures::FutureExt as _; + self.task.poll_unpin(cx) + } +} diff --git a/crates/astria-auctioneer/src/auctioneer/mod.rs b/crates/astria-auctioneer/src/auctioneer/mod.rs index cce3a43586..9e443d4d6b 100644 --- a/crates/astria-auctioneer/src/auctioneer/mod.rs +++ b/crates/astria-auctioneer/src/auctioneer/mod.rs @@ -1,40 +1,37 @@ -use std::future::Future; +use std::{ + future::Future, + task::Poll, +}; use astria_eyre::eyre::{ self, }; -use pin_project_lite::pin_project; -use tokio::task::{ - JoinError, - JoinHandle, -}; +use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tracing::instrument; use crate::{ + flatten_join_result, Config, Metrics, }; mod inner; -pin_project! { - /// Handle to the [`Auctioneer`] service, returned by [`Auctioneer::spawn`]. - pub struct Auctioneer { - shutdown_token: CancellationToken, - task: Option>>, - } +/// The [`Auctioneer`] service returned by [`Auctioneer::spawn`]. +pub struct Auctioneer { + shutdown_token: CancellationToken, + task: Option>>, } impl Auctioneer { - /// Creates an [`Auctioneer`] service and runs it, returning a handle to the taks and shutdown - /// token. + /// Spawns the [`Auctioneer`] service. /// /// # Errors /// Returns an error if the Auctioneer cannot be initialized. pub fn spawn(cfg: Config, metrics: &'static Metrics) -> eyre::Result { let shutdown_token = CancellationToken::new(); - let inner = inner::Auctioneer::new(cfg, metrics, shutdown_token.child_token())?; + let inner = inner::Inner::new(cfg, metrics, shutdown_token.child_token())?; let task = tokio::spawn(inner.run()); Ok(Self { @@ -43,37 +40,38 @@ impl Auctioneer { }) } - /// Initiates shutdown of the Auctioneer and returns its result. + /// Shuts down Auctioneer, in turn waiting for its components to shut down. /// /// # Errors - /// Returns an error if the Auctioneer exited with an error. + /// Returns an error if an error occured during shutdown. /// /// # Panics - /// Panics if shutdown is called twice. + /// Panics if called twice. #[instrument(skip_all, err)] - pub async fn shutdown(&mut self) -> Result, JoinError> { + pub async fn shutdown(&mut self) -> eyre::Result<()> { self.shutdown_token.cancel(); - self.task - .take() - .expect("shutdown must not be called twice") - .await + flatten_join_result( + self.task + .take() + .expect("shutdown must not be called twice") + .await, + ) } } impl Future for Auctioneer { - type Output = Result, tokio::task::JoinError>; + type Output = eyre::Result<()>; fn poll( - self: std::pin::Pin<&mut Self>, + mut self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, - ) -> std::task::Poll { + ) -> Poll { use futures::future::FutureExt as _; - let this = self.project(); - let task = this + let task = self .task .as_mut() - .expect("the Auctioneer handle must not be polled after shutdown"); - task.poll_unpin(cx) + .expect("auctioneer must not be polled after shutdown"); + task.poll_unpin(cx).map(flatten_join_result) } } diff --git a/crates/astria-auctioneer/src/block/commitment_stream.rs b/crates/astria-auctioneer/src/block/commitment_stream.rs deleted file mode 100644 index aa603ab074..0000000000 --- a/crates/astria-auctioneer/src/block/commitment_stream.rs +++ /dev/null @@ -1,64 +0,0 @@ -use std::pin::Pin; - -use astria_core::generated::sequencerblock::optimisticblock::v1alpha1::GetBlockCommitmentStreamResponse; -use astria_eyre::eyre::{ - self, - Context, - OptionExt as _, -}; -use futures::{ - Stream, - StreamExt as _, -}; -use pin_project_lite::pin_project; -use telemetry::display::base64; -use tracing::debug; - -use super::Commitment; -use crate::optimistic_block_client::OptimisticBlockClient; - -pin_project! { - /// A stream for receiving committed blocks from the sequencer. - pub(crate) struct BlockCommitmentStream { - #[pin] - client: tonic::Streaming, - } -} - -impl BlockCommitmentStream { - pub(crate) async fn connect(mut sequencer_client: OptimisticBlockClient) -> eyre::Result { - let commitment_stream_client = sequencer_client - .get_block_commitment_stream() - .await - .wrap_err("failed to stream block commitments")?; - - Ok(Self { - client: commitment_stream_client, - }) - } -} - -impl Stream for BlockCommitmentStream { - type Item = eyre::Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - let Some(res) = futures::ready!(self.client.poll_next_unpin(cx)) else { - return std::task::Poll::Ready(None); - }; - - let raw = res - .wrap_err("received gRPC error")? - .commitment - .ok_or_eyre("response did not contain block commitment")?; - - let commitment = - Commitment::try_from_raw(&raw).wrap_err("failed to parse raw to BlockCommitment")?; - - debug!(block_commitment.sequencer_block_hash = %base64(&commitment.sequencer_block_hash()), "received block commitment"); - - std::task::Poll::Ready(Some(Ok(commitment))) - } -} diff --git a/crates/astria-auctioneer/src/block/executed_stream.rs b/crates/astria-auctioneer/src/block/executed_stream.rs deleted file mode 100644 index bea5db6ed4..0000000000 --- a/crates/astria-auctioneer/src/block/executed_stream.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::pin::Pin; - -use astria_core::{ - generated::bundle::v1alpha1::{ - ExecuteOptimisticBlockStreamRequest, - ExecuteOptimisticBlockStreamResponse, - }, - primitive::v1::RollupId, -}; -use astria_eyre::eyre::{ - self, - WrapErr as _, -}; -use futures::{ - Stream, - StreamExt, -}; -use pin_project_lite::pin_project; -use telemetry::display::base64; -use tokio::sync::mpsc; -use tokio_stream::wrappers::ReceiverStream; -use tracing::{ - debug, - error, -}; - -use super::{ - Executed, - Optimistic, -}; -use crate::optimistic_execution_client::OptimisticExecutionClient; - -pub(crate) struct Handle { - blocks_to_execute_tx: mpsc::Sender, -} - -impl Handle { - pub(crate) fn try_send_block_to_execute(&mut self, block: Optimistic) -> eyre::Result<()> { - self.blocks_to_execute_tx - .try_send(block) - .wrap_err("failed to send block to execute")?; - - Ok(()) - } -} - -pin_project! { - /// A stream for receiving optimistic execution results from the rollup node. - pub(crate) struct ExecutedBlockStream { - #[pin] - client: tonic::Streaming, - } -} - -impl ExecutedBlockStream { - pub(crate) async fn connect( - rollup_id: RollupId, - rollup_grpc_endpoint: String, - ) -> eyre::Result<(Handle, ExecutedBlockStream)> { - let mut optimistic_execution_client = OptimisticExecutionClient::new(&rollup_grpc_endpoint) - .wrap_err("failed to initialize optimistic execution client")?; - let (executed_stream_client, blocks_to_execute_tx) = optimistic_execution_client - .execute_optimistic_block_stream(rollup_id) - .await - .wrap_err("failed to stream executed blocks")?; - - Ok(( - Handle { - blocks_to_execute_tx, - }, - Self { - client: executed_stream_client, - }, - )) - } -} - -impl Stream for ExecutedBlockStream { - type Item = eyre::Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context, - ) -> std::task::Poll> { - let Some(res) = futures::ready!(self.client.poll_next_unpin(cx)) else { - return std::task::Poll::Ready(None); - }; - - let raw = res.wrap_err("received gRPC Error")?; - - let executed_block = - Executed::try_from_raw(raw).wrap_err("failed to parse raw to Executed")?; - - debug!( - executed_block.rollup_block_hash = %base64(executed_block.rollup_block_hash()), - executed_block.sequencer_block_hash = %base64(executed_block.sequencer_block_hash()), - "received block execution result" - ); - - std::task::Poll::Ready(Some(Ok(executed_block))) - } -} - -pub(crate) fn make_execution_requests_stream( - rollup_id: RollupId, -) -> ( - mpsc::Sender, - impl tonic::IntoStreamingRequest, -) { - // TODO: should this capacity be a config instead of a magic number? OPTIMISTIC_REORG_BUFFER? - // TODO: add a metric so we can see if that becomes a problem - let (blocks_to_execute_tx, blocks_to_execute_rx) = mpsc::channel(16); - let blocks_to_execute_stream_rx = ReceiverStream::new(blocks_to_execute_rx); - - let requests = blocks_to_execute_stream_rx.filter_map(move |block: Optimistic| async move { - let base_block = block - .try_into_base_block(rollup_id) - .wrap_err("failed to create BaseBlock from FilteredSequencerBlock"); - - // skip blocks which fail to decode the transactions? - match base_block { - Ok(base_block) => Some(ExecuteOptimisticBlockStreamRequest { - base_block: Some(base_block), - }), - Err(e) => { - error!(error = %e, "skipping execution of invalid block"); - - None - } - } - }); - - (blocks_to_execute_tx, requests) -} diff --git a/crates/astria-auctioneer/src/block/mod.rs b/crates/astria-auctioneer/src/block/mod.rs index 3d087241de..850c6045d0 100644 --- a/crates/astria-auctioneer/src/block/mod.rs +++ b/crates/astria-auctioneer/src/block/mod.rs @@ -1,39 +1,30 @@ use astria_core::{ + Protobuf, execution, - generated::{ + generated::astria::{ bundle::v1alpha1 as raw_bundle, sequencerblock::{ - optimisticblock::v1alpha1 as raw_optimistic_block, - v1::{ - self as raw_sequencer_block, - }, + optimistic::v1alpha1 as raw_optimistic_block, + v1 as raw_sequencer_block, }, }, primitive::v1::RollupId, sequencerblock::v1::{ + RollupTransactions, block::{ + BlockHash, FilteredSequencerBlock, FilteredSequencerBlockParts, }, - RollupTransactions, }, - Protobuf, }; use astria_eyre::eyre::{ self, - ensure, - eyre, Context, + eyre, }; use bytes::Bytes; use prost::Message as _; -use telemetry::display::base64; - -use crate::bundle::Bundle; - -pub(crate) mod commitment_stream; -pub(crate) mod executed_stream; -pub(crate) mod optimistic_stream; /// Converts a [`tendermint::Time`] to a [`prost_types::Timestamp`]. fn convert_tendermint_time_to_protobuf_timestamp( @@ -56,12 +47,10 @@ pub(crate) struct Optimistic { } impl Optimistic { - pub(crate) fn try_from_raw( - raw: raw_sequencer_block::FilteredSequencerBlock, - ) -> eyre::Result { - Ok(Self { - filtered_sequencer_block: FilteredSequencerBlock::try_from_raw(raw)?, - }) + pub(crate) fn new(filtered_sequencer_block: FilteredSequencerBlock) -> Self { + Self { + filtered_sequencer_block, + } } /// Converts this [`Optimistic`] into a [`BaseBlock`] for the given `rollup_id`. @@ -99,19 +88,11 @@ impl Optimistic { let timestamp = Some(convert_tendermint_time_to_protobuf_timestamp(header.time())); Ok(raw_bundle::BaseBlock { - sequencer_block_hash: Bytes::copy_from_slice(&block_hash), + sequencer_block_hash: Bytes::copy_from_slice(&*block_hash), transactions, timestamp, }) } - - pub(crate) fn sequencer_block_hash(&self) -> [u8; 32] { - *self.filtered_sequencer_block.block_hash() - } - - pub(crate) fn sequencer_height(&self) -> u64 { - self.filtered_sequencer_block.height().into() - } } #[derive(Debug, Clone)] @@ -119,7 +100,7 @@ pub(crate) struct Executed { /// The rollup block metadata that resulted from executing the optimistic block. block: execution::v1::Block, /// The hash of the sequencer block that was executed optimistically. - sequencer_block_hash: [u8; 32], + sequencer_block_hash: BlockHash, } impl Executed { @@ -144,16 +125,8 @@ impl Executed { }) } - pub(crate) fn sequencer_block_hash(&self) -> [u8; 32] { - self.sequencer_block_hash - } - - pub(crate) fn parent_rollup_block_hash(&self) -> [u8; 32] { - self.block - .parent_block_hash() - .as_ref() - .try_into() - .expect("rollup block hash must be 32 bytes") + pub(crate) fn sequencer_block_hash(&self) -> &BlockHash { + &self.sequencer_block_hash } pub(crate) fn rollup_block_hash(&self) -> [u8; 32] { @@ -166,11 +139,13 @@ impl Executed { } #[derive(Debug, Clone)] +// FIXME: This is called a `Commitment` but is produced from a `SequencerBlockCommit`. +// This is very confusing. pub(crate) struct Commitment { /// The height of the sequencer block that was committed. - sequencer_height: u64, + height: u64, /// The hash of the sequencer block that was committed. - sequnecer_block_hash: [u8; 32], + block_hash: BlockHash, } impl Commitment { @@ -178,8 +153,8 @@ impl Commitment { raw: &raw_optimistic_block::SequencerBlockCommit, ) -> eyre::Result { Ok(Self { - sequencer_height: raw.height, - sequnecer_block_hash: raw + height: raw.height, + block_hash: raw .block_hash .as_ref() .try_into() @@ -187,88 +162,12 @@ impl Commitment { }) } - pub(crate) fn sequencer_block_hash(&self) -> [u8; 32] { - self.sequnecer_block_hash + pub(crate) fn block_hash(&self) -> &BlockHash { + &self.block_hash } - pub(crate) fn sequencer_height(&self) -> u64 { - self.sequencer_height - } -} - -pub(crate) struct Current { - optimistic: Optimistic, - executed: Option, - commitment: Option, -} - -impl Current { - /// Creates a new `Current` with the given `optimistic_block`. - pub(crate) fn with_optimistic(optimistic_block: Optimistic) -> Self { - Self { - optimistic: optimistic_block, - executed: None, - commitment: None, - } - } - - /// Updates the `Current` with the given `executed_block`. - /// This will fail if the `executed_block` does not match the `optimistic_block`'s sequencer - /// block hash. - pub(crate) fn execute(&mut self, executed_block: Executed) -> eyre::Result<()> { - if executed_block.sequencer_block_hash() != self.optimistic.sequencer_block_hash() { - return Err(eyre!("block hash mismatch")); - } - - self.executed = Some(executed_block); - Ok(()) - } - - /// Updates the `Current` with the given `block_commitment`. - /// This will fail if the `block_commitment` does not match the `optimistic_block`'s sequencer - /// block hash. - pub(crate) fn commitment(&mut self, block_commitment: Commitment) -> eyre::Result<()> { - if block_commitment.sequencer_block_hash() != self.optimistic.sequencer_block_hash() { - return Err(eyre!("block hash mismatch")); - } - if block_commitment.sequencer_height() != self.optimistic.sequencer_height() { - return Err(eyre!("block height mismatch")); - } - - self.commitment = Some(block_commitment); - Ok(()) - } - - pub(crate) fn sequencer_block_hash(&self) -> [u8; 32] { - self.optimistic.sequencer_block_hash() - } - - pub(crate) fn parent_rollup_block_hash(&self) -> Option<[u8; 32]> { - self.executed - .as_ref() - .map(Executed::parent_rollup_block_hash) - } - - /// Ensures that the given `bundle` is valid for the current block state. - pub(crate) fn ensure_bundle_is_valid(&self, bundle: &Bundle) -> eyre::Result<()> { - ensure!( - bundle.base_sequencer_block_hash() == self.sequencer_block_hash(), - "incoming bundle's sequencer block hash {bundle_hash} does not match current \ - sequencer block hash {current_hash}", - bundle_hash = base64(bundle.base_sequencer_block_hash()), - current_hash = base64(self.sequencer_block_hash()) - ); - - if let Some(rollup_parent_block_hash) = self.parent_rollup_block_hash() { - ensure!( - bundle.parent_rollup_block_hash() == rollup_parent_block_hash, - "bundle's rollup parent block hash {bundle_hash} does not match current rollup \ - parent block hash {current_hash}", - bundle_hash = base64(bundle.parent_rollup_block_hash()), - current_hash = base64(rollup_parent_block_hash) - ); - } - - Ok(()) + /// The height of the sequencer block that was committed. + pub(crate) fn height(&self) -> u64 { + self.height } } diff --git a/crates/astria-auctioneer/src/block/optimistic_stream.rs b/crates/astria-auctioneer/src/block/optimistic_stream.rs deleted file mode 100644 index 76c384dd21..0000000000 --- a/crates/astria-auctioneer/src/block/optimistic_stream.rs +++ /dev/null @@ -1,96 +0,0 @@ -use std::pin::Pin; - -use astria_core::{ - generated::sequencerblock::optimisticblock::v1alpha1::GetOptimisticBlockStreamResponse, - primitive::v1::RollupId, -}; -use astria_eyre::eyre::{ - self, - Context, - OptionExt as _, -}; -use futures::{ - Stream, - StreamExt as _, -}; -use pin_project_lite::pin_project; -use telemetry::display::base64; -use tracing::debug; - -use super::{ - executed_stream, - Optimistic, -}; -use crate::optimistic_block_client::OptimisticBlockClient; - -pin_project! { - /// A stream for receiving optimistic blocks from the sequencer. - /// Blocks received optimistically will be checked for validity and a clone will - /// be sent to the rollup's optimistic execution serivce before returning them - /// for further processing. - /// - /// ## Backpressure - /// While blocks are forwarded using an `mpsc` channel, we only receive incoming - /// optimistic blocks from the sequencer when CometBFT proposals are processed. - /// Multiple optimsitic blocks will be received in a short amount of time only in - /// the event that CometBFT receives multiple proposals within a single block's slot. - /// We assume this is relatively rare and that under normal operations a block will - /// be sent optimistically once per slot (~2 seconds). - pub(crate) struct OptimisticBlockStream { - #[pin] - client: tonic::Streaming, - #[pin] - execution_handle: executed_stream::Handle, - } -} - -impl OptimisticBlockStream { - pub(crate) async fn connect( - rollup_id: RollupId, - mut sequencer_client: OptimisticBlockClient, - execution_handle: executed_stream::Handle, - ) -> eyre::Result { - let optimistic_stream_client = sequencer_client - .get_optimistic_block_stream(rollup_id) - .await - .wrap_err("failed to stream optimistic blocks")?; - - Ok(OptimisticBlockStream { - client: optimistic_stream_client, - execution_handle, - }) - } -} - -impl Stream for OptimisticBlockStream { - type Item = eyre::Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context, - ) -> std::task::Poll> { - let Some(res) = futures::ready!(self.client.poll_next_unpin(cx)) else { - return std::task::Poll::Ready(None); - }; - - let raw = res - .wrap_err("received gRPC error")? - .block - .ok_or_eyre("response did not contain filtered sequencer block")?; - - let optimistic_block = - Optimistic::try_from_raw(raw).wrap_err("failed to parse raw to Optimistic")?; - - debug!( - optimistic_block.sequencer_block_hash = %base64(optimistic_block.sequencer_block_hash()), - "received optimistic block from sequencer" - ); - - // forward the optimistic block to the rollup's optimistic execution server - self.execution_handle - .try_send_block_to_execute(optimistic_block.clone()) - .wrap_err("failed to send optimistic block for execution")?; - - std::task::Poll::Ready(Some(Ok(optimistic_block))) - } -} diff --git a/crates/astria-auctioneer/src/bundle/client.rs b/crates/astria-auctioneer/src/bundle/client.rs deleted file mode 100644 index 2240b3aae5..0000000000 --- a/crates/astria-auctioneer/src/bundle/client.rs +++ /dev/null @@ -1,139 +0,0 @@ -use std::{ - pin::Pin, - time::Duration, -}; - -use astria_core::generated::bundle::v1alpha1::{ - bundle_service_client::BundleServiceClient, - GetBundleStreamRequest, - GetBundleStreamResponse, -}; -use astria_eyre::eyre::{ - self, - OptionExt, - WrapErr as _, -}; -use axum::http::Uri; -use futures::{ - Stream, - StreamExt, -}; -use tonic::transport::Endpoint; -use tracing::{ - instrument, - warn, - Instrument, - Span, -}; -use tryhard::backoff_strategies::ExponentialBackoff; - -use super::Bundle; - -pub(crate) struct BundleClient { - inner: BundleServiceClient, - uri: Uri, -} - -impl BundleClient { - pub(crate) fn new(rollup_uri: &str) -> eyre::Result { - let uri = rollup_uri - .parse::() - .wrap_err("failed to parse rollup_grpc_endpoint")?; - let endpoint = Endpoint::from(uri.clone()); - let client = BundleServiceClient::new(endpoint.connect_lazy()); - - Ok(Self { - inner: client, - uri, - }) - } - - #[instrument(skip_all, fields(uri = %self.uri))] - pub(crate) async fn get_bundle_stream( - &mut self, - ) -> eyre::Result> { - let span = tracing::Span::current(); - let retry_cfg = make_retry_cfg("get bundle stream".into(), span); - let client = self.inner.clone(); - - let stream = tryhard::retry_fn(|| { - let mut client = client.clone(); - async move { client.get_bundle_stream(GetBundleStreamRequest {}).await } - }) - .with_config(retry_cfg) - .in_current_span() - .await - .wrap_err("failed to get bundle stream")? - .into_inner(); - - Ok(stream) - } -} - -fn make_retry_cfg( - msg: String, - span: Span, -) -> tryhard::RetryFutureConfig< - ExponentialBackoff, - impl Fn(u32, Option, &tonic::Status) -> futures::future::Ready<()>, -> { - tryhard::RetryFutureConfig::new(1024) - .exponential_backoff(Duration::from_millis(100)) - .max_delay(Duration::from_secs(2)) - .on_retry( - move |attempt: u32, next_delay: Option, error: &tonic::Status| { - let wait_duration = next_delay - .map(humantime::format_duration) - .map(tracing::field::display); - warn!( - parent: &span, - attempt, - wait_duration, - error = error as &dyn std::error::Error, - "attempt to {msg} failed; retrying after backoff", - ); - futures::future::ready(()) - }, - ) -} - -pub(crate) struct BundleStream { - client: Pin>>, -} - -impl BundleStream { - pub(crate) async fn connect(rollup_grpc_endpoint: String) -> eyre::Result { - let mut client = BundleClient::new(&rollup_grpc_endpoint) - .wrap_err("failed to initialize bundle service client")?; - let stream = client - .get_bundle_stream() - .await - .wrap_err("failed to get bundle stream")?; - - Ok(Self { - client: Box::pin(stream), - }) - } -} - -impl Stream for BundleStream { - type Item = eyre::Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - let Some(res) = futures::ready!(self.client.poll_next_unpin(cx)) else { - return std::task::Poll::Ready(None); - }; - - let raw = res - .wrap_err("received gRPC error")? - .bundle - .ok_or_eyre("bundle stream response did not contain bundle")?; - - let bundle = Bundle::try_from_raw(raw).wrap_err("failed to parse raw Bundle")?; - - std::task::Poll::Ready(Some(Ok(bundle))) - } -} diff --git a/crates/astria-auctioneer/src/bundle/mod.rs b/crates/astria-auctioneer/src/bundle/mod.rs index cc77285350..ace2c5cf78 100644 --- a/crates/astria-auctioneer/src/bundle/mod.rs +++ b/crates/astria-auctioneer/src/bundle/mod.rs @@ -3,9 +3,7 @@ use astria_core::{ Signature, VerificationKey, }, - generated::bundle::v1alpha1::{ - self as raw, - }, + generated::astria::bundle::v1alpha1 as raw, primitive::v1::{ asset, RollupId, @@ -14,19 +12,17 @@ use astria_core::{ action::RollupDataSubmission, TransactionBody, }, + sequencerblock::v1::block::BlockHash, }; use astria_eyre::eyre::{ self, WrapErr as _, }; use bytes::Bytes; -pub(crate) use client::BundleStream; use prost::Message as _; use crate::sequencer_key::SequencerKey; -mod client; - // TODO: this should probably be moved to astria_core::bundle? #[derive(Debug, Clone)] pub(crate) struct Bundle { @@ -39,11 +35,11 @@ pub(crate) struct Bundle { prev_rollup_block_hash: [u8; 32], /// The hash of the sequencer block used to derive the rollup block that this bundle is based /// on. - base_sequencer_block_hash: [u8; 32], + base_sequencer_block_hash: BlockHash, } impl Bundle { - fn try_from_raw(raw: raw::Bundle) -> eyre::Result { + pub(crate) fn try_from_raw(raw: raw::Bundle) -> eyre::Result { let raw::Bundle { fee, transactions, @@ -68,7 +64,7 @@ impl Bundle { raw::Bundle { fee: self.fee, transactions: self.transactions, - base_sequencer_block_hash: Bytes::copy_from_slice(&self.base_sequencer_block_hash), + base_sequencer_block_hash: Bytes::copy_from_slice(&*self.base_sequencer_block_hash), prev_rollup_block_hash: Bytes::copy_from_slice(&self.prev_rollup_block_hash), } } @@ -77,7 +73,7 @@ impl Bundle { self, nonce: u32, rollup_id: RollupId, - sequencer_key: SequencerKey, + sequencer_key: &SequencerKey, fee_asset: asset::Denom, chain_id: String, ) -> TransactionBody { @@ -107,8 +103,8 @@ impl Bundle { self.prev_rollup_block_hash } - pub(crate) fn base_sequencer_block_hash(&self) -> [u8; 32] { - self.base_sequencer_block_hash + pub(crate) fn base_sequencer_block_hash(&self) -> &BlockHash { + &self.base_sequencer_block_hash } } @@ -120,7 +116,7 @@ pub(crate) struct Allocation { } impl Allocation { - fn new(bundle: Bundle, sequencer_key: SequencerKey) -> Self { + fn new(bundle: Bundle, sequencer_key: &SequencerKey) -> Self { let bundle_data = bundle.clone().into_raw().encode_to_vec(); let signature = sequencer_key.signing_key().sign(&bundle_data); let verification_key = sequencer_key.signing_key().verification_key(); diff --git a/crates/astria-auctioneer/src/lib.rs b/crates/astria-auctioneer/src/lib.rs index b50e8078b7..064b50ff7d 100644 --- a/crates/astria-auctioneer/src/lib.rs +++ b/crates/astria-auctioneer/src/lib.rs @@ -1,16 +1,15 @@ //! TODO: Add a description -mod auction; mod auctioneer; mod block; mod build_info; mod bundle; pub mod config; pub(crate) mod metrics; -mod optimistic_block_client; -mod optimistic_execution_client; -mod optimistic_executor; +mod rollup_channel; +mod sequencer_channel; mod sequencer_key; +mod streaming_utils; use astria_eyre::{ eyre, @@ -23,7 +22,7 @@ pub use metrics::Metrics; pub use telemetry; use tokio::task::JoinError; -fn flatten_result(res: Result, JoinError>) -> eyre::Result { +fn flatten_join_result(res: Result, JoinError>) -> eyre::Result { match res { Ok(Ok(val)) => Ok(val), Ok(Err(err)) => Err(err).wrap_err("task returned with error"), diff --git a/crates/astria-auctioneer/src/main.rs b/crates/astria-auctioneer/src/main.rs index c6d9ea0db0..326cf7b22f 100644 --- a/crates/astria-auctioneer/src/main.rs +++ b/crates/astria-auctioneer/src/main.rs @@ -5,7 +5,11 @@ use astria_auctioneer::{ Config, BUILD_INFO, }; -use astria_eyre::eyre::WrapErr as _; +use astria_eyre::eyre::{ + self, + eyre, + WrapErr as _, +}; use tokio::{ select, signal::unix::{ @@ -16,6 +20,7 @@ use tokio::{ use tracing::{ error, info, + instrument, warn, }; @@ -25,8 +30,17 @@ async fn main() -> ExitCode { eprintln!("{}", telemetry::display::json(&BUILD_INFO)); - let cfg: Config = config::get().expect("failed to read configuration"); - eprintln!("{}", telemetry::display::json(&cfg),); + let cfg: Config = match config::get() { + Err(err) => { + eprintln!("failed to read configuration:\n{err:?}"); + return ExitCode::FAILURE; + } + Ok(cfg) => cfg, + }; + eprintln!( + "starting with configuration:\n{}", + telemetry::display::json(&cfg), + ); let mut telemetry_conf = telemetry::configure() .set_no_otel(cfg.no_otel) @@ -66,22 +80,32 @@ async fn main() -> ExitCode { let mut sigterm = signal(SignalKind::terminate()) .expect("setting a SIGTERM listener should always work on Unix"); - select! { - _ = sigterm.recv() => { - info!("received SIGTERM; shutting down"); - if let Err(error) = auctioneer.shutdown().await { - warn!(%error, "encountered an error while shutting down"); - } - info!("auctioneer stopped"); - ExitCode::SUCCESS + let exit_reason = select! { + _ = sigterm.recv() => Ok("received shutdown signal"), + res = &mut auctioneer => { + res.and_then(|()| Err(eyre!("auctioneer task exited unexpectedly"))) } + }; - res = &mut auctioneer => { - error!( - error = res.err().map(tracing::field::display), - "auctioneer task exited unexpectedly" - ); + shutdown(exit_reason, auctioneer).await +} + +#[instrument(skip_all)] +async fn shutdown(reason: eyre::Result<&'static str>, mut service: Auctioneer) -> ExitCode { + let message = "shutting down"; + let exit_code = match reason { + Ok(reason) => { + info!(reason, message); + if let Err(error) = service.shutdown().await { + warn!(%error, "encountered errors during shutdown"); + }; + ExitCode::SUCCESS + } + Err(reason) => { + error!(%reason, message); ExitCode::FAILURE } - } + }; + info!("shutdown target reached"); + exit_code } diff --git a/crates/astria-auctioneer/src/optimistic_block_client.rs b/crates/astria-auctioneer/src/optimistic_block_client.rs deleted file mode 100644 index 7a2cacc96d..0000000000 --- a/crates/astria-auctioneer/src/optimistic_block_client.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::time::Duration; - -use astria_core::{ - generated::sequencerblock::optimisticblock::v1alpha1::{ - optimistic_block_service_client::OptimisticBlockServiceClient, - GetBlockCommitmentStreamRequest, - GetBlockCommitmentStreamResponse, - GetOptimisticBlockStreamRequest, - GetOptimisticBlockStreamResponse, - }, - primitive::v1::RollupId, -}; -use astria_eyre::eyre::{ - self, - WrapErr as _, -}; -use tonic::transport::{ - Channel, - Endpoint, - Uri, -}; -use tracing::{ - instrument, - warn, - Instrument as _, - Span, -}; -use tryhard::backoff_strategies::ExponentialBackoff; - -/// Wraps the gRPC client for the Sequencer service that wraps client calls with `tryhard`. -#[derive(Debug, Clone)] -pub(crate) struct OptimisticBlockClient { - inner: OptimisticBlockServiceClient, - uri: Uri, -} - -impl OptimisticBlockClient { - pub(crate) fn new(sequencer_uri: &str) -> eyre::Result { - let uri = sequencer_uri - .parse::() - .wrap_err("failed parsing provided string as Uri")?; - - // TODO: use a UDS socket instead - let endpoint = Endpoint::from(uri.clone()); - let inner = OptimisticBlockServiceClient::new(endpoint.connect_lazy()); - Ok(Self { - inner, - uri, - }) - } - - // TODO: this should probably be separated from the tryhard logic and put in an extension trait - #[instrument(skip_all, fields( - uri = %self.uri, - %rollup_id, - err, - ))] - pub(super) async fn get_optimistic_block_stream( - &mut self, - rollup_id: RollupId, - ) -> eyre::Result> { - let span = tracing::Span::current(); - let retry_cfg = make_retry_cfg("stream optimistic blocks".into(), span); - let client = self.inner.clone(); - - let stream = tryhard::retry_fn(|| { - let mut client = client.clone(); - let req = GetOptimisticBlockStreamRequest { - rollup_id: Some(rollup_id.into_raw()), - }; - async move { client.get_optimistic_block_stream(req).await } - }) - .with_config(retry_cfg) - .in_current_span() - .await - .wrap_err("failed to initialize optimistic block stream")? - .into_inner(); - - Ok(stream) - } - - // TODO: this should probably be separated from the tryhard logic and put in an extension trait - #[instrument(skip_all, fields( - uri = %self.uri, - err, - ))] - pub(super) async fn get_block_commitment_stream( - &mut self, - ) -> eyre::Result> { - let span = tracing::Span::current(); - let retry_cfg = make_retry_cfg("stream block commitments".into(), span); - let client = self.inner.clone(); - - let stream = tryhard::retry_fn(|| { - let mut client = client.clone(); - let req = GetBlockCommitmentStreamRequest {}; - async move { client.get_block_commitment_stream(req).await } - }) - .with_config(retry_cfg) - .in_current_span() - .await - .wrap_err("failed to initialize block commitment stream")? - .into_inner(); - - Ok(stream) - } -} - -fn make_retry_cfg( - msg: String, - span: Span, -) -> tryhard::RetryFutureConfig< - ExponentialBackoff, - impl Fn(u32, Option, &tonic::Status) -> futures::future::Ready<()>, -> { - tryhard::RetryFutureConfig::new(1024) - .exponential_backoff(Duration::from_millis(100)) - .max_delay(Duration::from_secs(2)) - .on_retry( - move |attempt: u32, next_delay: Option, error: &tonic::Status| { - let wait_duration = next_delay - .map(humantime::format_duration) - .map(tracing::field::display); - warn!( - parent: &span, - attempt, - wait_duration, - error = error as &dyn std::error::Error, - "attempt to {msg} failed; retrying after backoff", - ); - futures::future::ready(()) - }, - ) -} diff --git a/crates/astria-auctioneer/src/optimistic_execution_client.rs b/crates/astria-auctioneer/src/optimistic_execution_client.rs deleted file mode 100644 index d836a41451..0000000000 --- a/crates/astria-auctioneer/src/optimistic_execution_client.rs +++ /dev/null @@ -1,114 +0,0 @@ -use std::time::Duration; - -use astria_core::{ - generated::bundle::v1alpha1::{ - optimistic_execution_service_client::OptimisticExecutionServiceClient, - ExecuteOptimisticBlockStreamResponse, - }, - primitive::v1::RollupId, -}; -use astria_eyre::eyre::{ - self, - Context, -}; -use tokio::sync::mpsc; -use tonic::transport::{ - Channel, - Endpoint, - Uri, -}; -use tracing::{ - instrument, - warn, - Instrument, - Span, -}; -use tryhard::backoff_strategies::ExponentialBackoff; - -use crate::block::{ - self, - executed_stream::make_execution_requests_stream, -}; - -pub(crate) struct OptimisticExecutionClient { - inner: OptimisticExecutionServiceClient, - uri: Uri, -} - -impl OptimisticExecutionClient { - pub(crate) fn new(rollup_uri: &str) -> eyre::Result { - let uri = rollup_uri - .parse::() - .wrap_err("failed parsing optimistic execution uri")?; - - // TODO: use UDS socket - let endpoint = Endpoint::from(uri.clone()); - let inner = OptimisticExecutionServiceClient::new(endpoint.connect_lazy()); - - Ok(Self { - inner, - uri, - }) - } - - #[instrument(skip_all, fields( - uri = %self.uri, - %rollup_id, - err, - ))] - pub(crate) async fn execute_optimistic_block_stream( - &mut self, - rollup_id: RollupId, - ) -> eyre::Result<( - tonic::Streaming, - mpsc::Sender, - )> { - let span = tracing::Span::current(); - let retry_cfg = make_retry_cfg("execute optimistic blocks".into(), span); - let client = self.inner.clone(); - - let (stream, opt_tx) = tryhard::retry_fn(|| { - let mut client = client.clone(); - - let (blocks_to_execute_tx, requests) = make_execution_requests_stream(rollup_id); - - async move { - let stream = client.execute_optimistic_block_stream(requests).await?; - Ok((stream, blocks_to_execute_tx)) - } - }) - .with_config(retry_cfg) - .in_current_span() - .await - .wrap_err("failed to initialize optimistic execution stream")?; - - Ok((stream.into_inner(), opt_tx)) - } -} - -fn make_retry_cfg( - msg: String, - span: Span, -) -> tryhard::RetryFutureConfig< - ExponentialBackoff, - impl Fn(u32, Option, &tonic::Status) -> futures::future::Ready<()>, -> { - tryhard::RetryFutureConfig::new(1024) - .exponential_backoff(Duration::from_millis(100)) - .max_delay(Duration::from_secs(2)) - .on_retry( - move |attempt: u32, next_delay: Option, error: &tonic::Status| { - let wait_duration = next_delay - .map(humantime::format_duration) - .map(tracing::field::display); - warn!( - parent: &span, - attempt, - wait_duration, - error = error as &dyn std::error::Error, - "attempt to {msg} failed; retrying after backoff", - ); - futures::future::ready(()) - }, - ) -} diff --git a/crates/astria-auctioneer/src/optimistic_executor/builder.rs b/crates/astria-auctioneer/src/optimistic_executor/builder.rs deleted file mode 100644 index 8dce746aa0..0000000000 --- a/crates/astria-auctioneer/src/optimistic_executor/builder.rs +++ /dev/null @@ -1,47 +0,0 @@ -use astria_core::primitive::v1::RollupId; -use tokio_util::sync::CancellationToken; - -use super::Startup; -use crate::{ - auction, - Metrics, -}; - -pub(crate) struct Builder { - pub(crate) metrics: &'static Metrics, - pub(crate) shutdown_token: CancellationToken, - /// The endpoint for the sequencer gRPC service used for the optimistic block stream - pub(crate) sequencer_grpc_endpoint: String, - /// The file path for the private key used to sign sequencer transactions with the auction - /// results - /// The rollup ID for the filtered optimistic block stream - pub(crate) rollup_id: String, - /// The endpoint for the rollup's optimistic execution gRPC service - pub(crate) rollup_grpc_endpoint: String, - /// Manager for ongoing auctions - pub(crate) auctions: auction::Manager, -} - -impl Builder { - pub(crate) fn build(self) -> Startup { - let Self { - metrics, - shutdown_token, - sequencer_grpc_endpoint, - rollup_id, - rollup_grpc_endpoint, - auctions, - } = self; - - let rollup_id = RollupId::from_unhashed_bytes(&rollup_id); - - Startup { - metrics, - shutdown_token, - sequencer_grpc_endpoint, - rollup_id, - rollup_grpc_endpoint, - auctions, - } - } -} diff --git a/crates/astria-auctioneer/src/optimistic_executor/mod.rs b/crates/astria-auctioneer/src/optimistic_executor/mod.rs deleted file mode 100644 index 8eeed8e3d4..0000000000 --- a/crates/astria-auctioneer/src/optimistic_executor/mod.rs +++ /dev/null @@ -1,307 +0,0 @@ -//! The Optimistic Executor is the component responsible for maintaining the current block -//! state based on the optimistic block stream, the block commitment stream, and the executed -//! block stream. The Optimistic Executior uses its current block state for running an auction -//! per block. Incoming bundles are fed to the current auction after checking them against the -//! current block state. -//! -//! ## Block Lifecycle -//! The Optimistic Executor tracks its current block using the `block:Current` struct, at a high -//! level: -//! 1. Blocks are received optimistically from the sequencer via the optimistic block stream, which -//! also forwards them to the rollup node for execution. -//! 2. Execution results are received from the rollup node via the executed block stream. -//! 3. Commitments are received from the sequencer via the block commitment stream. -//! -//! ## Auction Lifecycle -//! The current block state is used for running an auction per block. Auctions are managed by the -//! `auction::Manager` struct, and the Optimistic Executor advances their state based on its current -//! block state: -//! 1. An auction is created when a new block is received optimistically. -//! 2. The auction will begin processing bids when the executed block is received. -//! 3. The auction's timer is started when a block commitment is received. -//! 4. Bundles are fed to the current auction after checking them against the current block state. -//! -//! ### Bundles and Backpressure -//! Bundles are fed to the current auction via an mpsc channel. Since the auction will only start -//! processing bids after the executed block is received, the channel is used to buffer bundles -//! that are received before the executed block. -//! If too many bundles are received from the rollup node before the block is executed -//! optimistically on the rollup node, the channel will fill up and newly received bundles will be -//! dropped until the auction begins processing bundles. -//! We assume this is highly unlikely, as the rollup node's should filter the bundles it streams -//! by its optimistic head block hash. - -use astria_core::primitive::v1::RollupId; -use astria_eyre::eyre::{ - self, - eyre, - OptionExt, - WrapErr as _, -}; -use telemetry::display::base64; -use tokio::select; -use tokio_stream::StreamExt as _; -use tokio_util::sync::CancellationToken; -use tracing::{ - error, - info, - instrument, - warn, -}; - -use crate::{ - auction, - block::{ - self, - commitment_stream::BlockCommitmentStream, - executed_stream::ExecutedBlockStream, - optimistic_stream::OptimisticBlockStream, - }, - bundle::{ - Bundle, - BundleStream, - }, - optimistic_block_client::OptimisticBlockClient, -}; - -mod builder; -pub(crate) use builder::Builder; - -macro_rules! break_for_closed_stream { - ($stream_res:expr, $msg:expr) => { - match $stream_res { - Some(val) => val, - None => break Err(eyre!($msg)), - } - }; -} - -pub(crate) struct Startup { - #[allow(dead_code)] - metrics: &'static crate::Metrics, - shutdown_token: CancellationToken, - sequencer_grpc_endpoint: String, - rollup_id: RollupId, - rollup_grpc_endpoint: String, - auctions: auction::Manager, -} - -impl Startup { - pub(crate) async fn startup(self) -> eyre::Result { - let Self { - metrics, - shutdown_token, - sequencer_grpc_endpoint, - rollup_id, - rollup_grpc_endpoint, - auctions, - } = self; - - let (execution_stream_handle, executed_blocks) = - ExecutedBlockStream::connect(rollup_id, rollup_grpc_endpoint.clone()) - .await - .wrap_err("failed to initialize executed block stream")?; - - let sequencer_client = OptimisticBlockClient::new(&sequencer_grpc_endpoint) - .wrap_err("failed to initialize sequencer grpc client")?; - let mut optimistic_blocks = OptimisticBlockStream::connect( - rollup_id, - sequencer_client.clone(), - execution_stream_handle, - ) - .await - .wrap_err("failed to initialize optimsitic block stream")?; - - let block_commitments = BlockCommitmentStream::connect(sequencer_client) - .await - .wrap_err("failed to initialize block commitment stream")?; - - let bundle_stream = BundleStream::connect(rollup_grpc_endpoint) - .await - .wrap_err("failed to initialize bundle stream")?; - - let optimistic_block = optimistic_blocks - .next() - .await - .ok_or_eyre("optimistic stream closed during startup?")? - .wrap_err("failed to get optimistic block during startup")?; - let current_block = block::Current::with_optimistic(optimistic_block); - - Ok(Running { - metrics, - shutdown_token, - optimistic_blocks, - block_commitments, - executed_blocks, - bundle_stream, - auctions, - current_block, - }) - } -} - -pub(crate) struct Running { - // TODO: add metrics - #[allow(dead_code)] - metrics: &'static crate::Metrics, - shutdown_token: CancellationToken, - optimistic_blocks: OptimisticBlockStream, - block_commitments: BlockCommitmentStream, - executed_blocks: ExecutedBlockStream, - bundle_stream: BundleStream, - auctions: auction::Manager, - current_block: block::Current, -} - -impl Running { - pub(crate) async fn run(mut self) -> eyre::Result<()> { - let reason: eyre::Result<&str> = { - // This is a long running loop. Errors are emitted inside the handlers. - loop { - select! { - biased; - () = self.shutdown_token.cancelled() => { - break Ok("received shutdown signal"); - }, - - Some((id, res)) = self.auctions.join_next() => { - res.wrap_err_with(|| format!("auction failed for block {}", base64(id)))?; - }, - - res = self.optimistic_blocks.next() => { - let res = break_for_closed_stream!(res, "optimistic block stream closed"); - - let _ = self.handle_optimistic_block(res); - }, - - res = self.block_commitments.next() => { - let res = break_for_closed_stream!(res, "block commitment stream closed"); - - let _ = self.handle_block_commitment(res); - - }, - - res = self.executed_blocks.next() => { - let res = break_for_closed_stream!(res, "executed block stream closed"); - - let _ = self.handle_executed_block(res); - } - - Some(res) = self.bundle_stream.next() => { - let bundle = res.wrap_err("failed to get bundle")?; - - let _ = self.handle_bundle(bundle); - } - } - } - }; - - match reason { - Ok(msg) => info!(%msg, "shutting down"), - Err(err) => error!(%err, "shutting down due to error"), - }; - - Ok(()) - } - - #[instrument(skip(self), fields(auction.old_id = %base64(self.current_block.sequencer_block_hash())), err)] - fn handle_optimistic_block( - &mut self, - optimistic_block: eyre::Result, - ) -> eyre::Result<()> { - let optimistic_block = optimistic_block.wrap_err("failed to receive optimistic block")?; - - let old_auction_id = - auction::Id::from_sequencer_block_hash(self.current_block.sequencer_block_hash()); - self.auctions - .abort_auction(old_auction_id) - .wrap_err("failed to abort auction")?; - - info!( - optimistic_block.sequencer_block_hash = %base64(optimistic_block.sequencer_block_hash()), - "received optimistic block, aborting old auction and starting new auction" - ); - - self.current_block = block::Current::with_optimistic(optimistic_block.clone()); - let new_auction_id = - auction::Id::from_sequencer_block_hash(self.current_block.sequencer_block_hash()); - self.auctions.new_auction(new_auction_id); - - Ok(()) - } - - #[instrument(skip_all, fields(auction.id = %base64(self.current_block.sequencer_block_hash())), err)] - fn handle_block_commitment( - &mut self, - block_commitment: eyre::Result, - ) -> eyre::Result<()> { - let block_commitment = block_commitment.wrap_err("failed to receive block commitment")?; - - if let Err(e) = self.current_block.commitment(block_commitment.clone()) { - warn!( - current_block.sequencer_block_hash = %base64(self.current_block.sequencer_block_hash()), - block_commitment.sequencer_block_hash = %base64(block_commitment.sequencer_block_hash()), - "received block commitment for the wrong block" - ); - return Err(e).wrap_err("failed to handle block commitment"); - } - - let auction_id = - auction::Id::from_sequencer_block_hash(self.current_block.sequencer_block_hash()); - - self.auctions - .start_timer(auction_id) - .wrap_err("failed to start timer")?; - - Ok(()) - } - - #[instrument(skip_all, fields(auction.id = %base64(self.current_block.sequencer_block_hash())))] - fn handle_executed_block( - &mut self, - executed_block: eyre::Result, - ) -> eyre::Result<()> { - let executed_block = executed_block.wrap_err("failed to receive executed block")?; - - if let Err(e) = self.current_block.execute(executed_block.clone()) { - warn!( - // TODO: nicer display for the current block - current_block.sequencer_block_hash = %base64(self.current_block.sequencer_block_hash()), - executed_block.sequencer_block_hash = %base64(executed_block.sequencer_block_hash()), - executed_block.rollup_block_hash = %base64(executed_block.rollup_block_hash()), - "received optimistic execution result for wrong sequencer block" - ); - return Err(e).wrap_err("failed to handle executed block"); - } - - let auction_id = - auction::Id::from_sequencer_block_hash(self.current_block.sequencer_block_hash()); - - self.auctions - .start_processing_bids(auction_id) - .wrap_err("failed to start processing bids")?; - - Ok(()) - } - - #[instrument(skip_all, fields(auction.id = %base64(self.current_block.sequencer_block_hash())))] - fn handle_bundle(&mut self, bundle: Bundle) -> eyre::Result<()> { - if let Err(e) = self.current_block.ensure_bundle_is_valid(&bundle) { - warn!( - curent_block.sequencer_block_hash = %base64(self.current_block.sequencer_block_hash()), - bundle.sequencer_block_hash = %base64(bundle.base_sequencer_block_hash()), - bundle.parent_rollup_block_hash = %base64(bundle.parent_rollup_block_hash()), - "incoming bundle does not match current block, ignoring" - ); - return Err(e).wrap_err("failed to handle bundle"); - } - - let auction_id = - auction::Id::from_sequencer_block_hash(self.current_block.sequencer_block_hash()); - self.auctions - .try_send_bundle(auction_id, bundle) - .wrap_err("failed to submit bundle to auction")?; - - Ok(()) - } -} diff --git a/crates/astria-auctioneer/src/rollup_channel.rs b/crates/astria-auctioneer/src/rollup_channel.rs new file mode 100644 index 0000000000..e7a1b7fa3d --- /dev/null +++ b/crates/astria-auctioneer/src/rollup_channel.rs @@ -0,0 +1,223 @@ +use std::{ + pin::Pin, + task::{ + ready, + Context, + Poll, + }, +}; + +use astria_core::generated::astria::bundle::v1alpha1::{ + bundle_service_client::BundleServiceClient, + BaseBlock, + ExecuteOptimisticBlockStreamResponse, + GetBundleStreamResponse, +}; +use astria_eyre::eyre::{ + self, + eyre, + WrapErr as _, +}; +use futures::{ + stream::BoxStream, + Stream, + StreamExt, +}; +use prost::Name as _; +use tokio::sync::broadcast; +use tokio_stream::wrappers::BroadcastStream; +use tracing::{ + info_span, + warn, + Instrument as _, +}; + +use crate::{ + bundle::Bundle, + streaming_utils::{ + make_instrumented_channel, + restarting_stream, + InstrumentedChannel, + }, +}; + +pub(crate) fn open(endpoint: &str) -> eyre::Result { + RollupChannel::create(endpoint) + .wrap_err_with(|| format!("failed to create a gRPC channel to rollup at `{endpoint}`")) +} + +#[derive(Clone)] +pub(crate) struct RollupChannel { + inner: InstrumentedChannel, +} + +impl RollupChannel { + fn create(uri: &str) -> eyre::Result { + Ok(Self { + inner: make_instrumented_channel(uri)?, + }) + } + + pub(crate) fn open_bundle_stream(&self) -> BundleStream { + use astria_core::generated::astria::bundle::v1alpha1::GetBundleStreamRequest; + let chan = self.inner.clone(); + let inner = restarting_stream(move || { + let chan = chan.clone(); + async move { + let inner = BundleServiceClient::new(chan) + .get_bundle_stream(GetBundleStreamRequest {}) + .await + .wrap_err("failed to open bundle stream") + .inspect_err(|error| warn!(%error))? + .into_inner(); + Ok(InnerBundleStream { + inner, + }) + } + .instrument(info_span!("request bundle stream")) + }) + .boxed(); + BundleStream { + inner, + } + } + + pub(crate) fn open_execute_optimistic_block_stream(&self) -> ExecuteOptimisticBlockStream { + use astria_core::generated::astria::bundle::v1alpha1::{ + optimistic_execution_service_client::OptimisticExecutionServiceClient, + ExecuteOptimisticBlockStreamRequest, + }; + + // NOTE: this implementation uses a broadcast channel instead of an mpsc because + // one can get new readers by using Sender::subscribe. This is important for the + // restart mechanism. The mpsc channel (or rather the tokio stream ReceiverStream wrapper) + // would need something ugly like a Arc>, but where + // we'd need to also implement Stream for some wrapper around it.... It's a mess. + let (to_server, _) = broadcast::channel(16); + let chan = self.inner.clone(); + let to_server_2 = to_server.clone(); + let incoming = restarting_stream(move || { + let chan = chan.clone(); + let out_stream = BroadcastStream::new(to_server_2.subscribe()) + // TODO: emit some kind of event when the stream actually starts + // lagging behind instead of quietly discarding the issue. + .filter_map(|maybe_lagged| std::future::ready(maybe_lagged.ok())) + .map(|base_block| ExecuteOptimisticBlockStreamRequest { + base_block: Some(base_block), + }); + + async move { + let inner = OptimisticExecutionServiceClient::new(chan) + .execute_optimistic_block_stream(out_stream) + .await + .wrap_err("failed to open execute optimistic block stream") + .inspect_err(|error| warn!(%error))? + .into_inner(); + Ok(InnerExecuteOptimisticBlockStream { + inner, + }) + } + .instrument(info_span!("request execute optimistic block stream")) + }) + .boxed(); + + ExecuteOptimisticBlockStream { + incoming, + outgoing: to_server, + } + } +} + +pub(crate) struct BundleStream { + inner: BoxStream<'static, eyre::Result>, +} + +impl Stream for BundleStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_next_unpin(cx) + } +} + +struct InnerBundleStream { + inner: tonic::Streaming, +} + +impl Stream for InnerBundleStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let Some(res) = ready!(self.inner.poll_next_unpin(cx)) else { + return Poll::Ready(None); + }; + + let raw = res + .wrap_err("error while receiving streamed message from server")? + .bundle + .ok_or_else(|| { + eyre!( + "message field not set: `{}.bundle`", + GetBundleStreamResponse::full_name() + ) + })?; + + let bundle = Bundle::try_from_raw(raw).wrap_err_with(|| { + format!( + "failed to validate received message `{}`", + astria_core::generated::astria::bundle::v1alpha1::Bundle::full_name() + ) + })?; + + Poll::Ready(Some(Ok(bundle))) + } +} + +pub(crate) struct ExecuteOptimisticBlockStream { + incoming: BoxStream<'static, eyre::Result>, + outgoing: broadcast::Sender, +} + +impl ExecuteOptimisticBlockStream { + /// Immediately sends `base_block` to the connected server. Fails if + /// the channel is full. + // NOTE: just leak the tokio error for now. It's crate private anyway + // and we'd just end up wrapping the same variants. + pub(crate) fn try_send( + &mut self, + base_block: BaseBlock, + ) -> Result<(), broadcast::error::SendError> { + self.outgoing.send(base_block).map(|_| ()) + } +} + +impl Stream for ExecuteOptimisticBlockStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.incoming.poll_next_unpin(cx) + } +} + +pub(crate) struct InnerExecuteOptimisticBlockStream { + inner: tonic::Streaming, +} + +impl Stream for InnerExecuteOptimisticBlockStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let Some(message) = ready!(self.inner.poll_next_unpin(cx)) else { + return Poll::Ready(None); + }; + + let message = message.wrap_err("failed receiving message over stream")?; + let executed_block = crate::block::Executed::try_from_raw(message).wrap_err_with(|| { + format!( + "failed to validate `{}`", + astria_core::generated::astria::bundle::v1alpha1::ExecuteOptimisticBlockStreamResponse::full_name(), + ) + })?; + Poll::Ready(Some(Ok(executed_block))) + } +} diff --git a/crates/astria-auctioneer/src/sequencer_channel.rs b/crates/astria-auctioneer/src/sequencer_channel.rs new file mode 100644 index 0000000000..c212d1f27a --- /dev/null +++ b/crates/astria-auctioneer/src/sequencer_channel.rs @@ -0,0 +1,243 @@ +use std::{ + pin::Pin, + task::{ + ready, + Context, + Poll, + }, +}; + +use astria_core::{ + generated::astria::sequencerblock::optimistic::v1alpha1::{ + GetBlockCommitmentStreamRequest, + GetBlockCommitmentStreamResponse, + GetOptimisticBlockStreamRequest, + GetOptimisticBlockStreamResponse, + }, + primitive::v1::{ + Address, + RollupId, + }, + sequencerblock::v1::block::FilteredSequencerBlock, + Protobuf as _, +}; +use astria_eyre::eyre::{ + self, + eyre, + WrapErr as _, +}; +use futures::{ + stream::BoxStream, + Future, + Stream, + StreamExt as _, +}; +use prost::Name; +use tracing::{ + info_span, + warn, + Instrument as _, +}; + +use crate::{ + block::Commitment, + streaming_utils::{ + restarting_stream, + InstrumentedChannel, + }, +}; + +pub(crate) fn open(endpoint: &str) -> eyre::Result { + SequencerChannel::create(endpoint) + .wrap_err_with(|| format!("failed to create a gRPC channel to Sequencer at `{endpoint}`")) +} + +#[derive(Clone)] +pub(crate) struct SequencerChannel { + inner: InstrumentedChannel, +} + +impl SequencerChannel { + fn create(uri: &str) -> eyre::Result { + Ok(Self { + inner: crate::streaming_utils::make_instrumented_channel(uri)?, + }) + } + + pub(crate) fn get_pending_nonce( + &self, + address: Address, + ) -> impl Future> { + use astria_core::generated::astria::sequencerblock::v1::{ + sequencer_service_client::SequencerServiceClient, + GetPendingNonceRequest, + }; + + let mut client = SequencerServiceClient::new(self.inner.clone()); + async move { + let nonce = client + .get_pending_nonce(GetPendingNonceRequest { + address: Some(address.into_raw()), + }) + .await + .wrap_err("failed to fetch most recent pending nonce")? + .into_inner() + .inner; + Ok(nonce) + } + } + + pub(crate) fn open_get_block_commitment_stream(&self) -> BlockCommitmentStream { + use astria_core::generated::astria::sequencerblock::optimistic::v1alpha1:: + optimistic_block_service_client::OptimisticBlockServiceClient; + let chan = self.inner.clone(); + let inner = restarting_stream(move || { + let chan = chan.clone(); + async move { + let inner = OptimisticBlockServiceClient::new(chan) + .get_block_commitment_stream(GetBlockCommitmentStreamRequest {}) + .await + .wrap_err("failed to open block commitment stream") + .inspect_err(|error| warn!(%error))? + .into_inner(); + Ok(InnerBlockCommitmentStream { + inner, + }) + } + .instrument(info_span!("request block commitment stream")) + }) + .boxed(); + BlockCommitmentStream { + inner, + } + } + + pub(crate) fn open_get_optimistic_block_stream( + &self, + rollup_id: RollupId, + ) -> OptimisticBlockStream { + use astria_core::generated::astria::sequencerblock::optimistic::v1alpha1::{ + optimistic_block_service_client::OptimisticBlockServiceClient, + GetOptimisticBlockStreamRequest, + }; + + let chan = self.inner.clone(); + let inner = restarting_stream(move || { + let chan = chan.clone(); + async move { + let mut client = OptimisticBlockServiceClient::new(chan); + let inner = client + .get_optimistic_block_stream(GetOptimisticBlockStreamRequest { + rollup_id: Some(rollup_id.into_raw()), + }) + .await + .wrap_err("failed to open optimistic block stream") + .inspect_err(|error| warn!(%error))? + .into_inner(); + Ok(InnerOptimisticBlockStream { + inner, + }) + } + .instrument(info_span!("request optimistic block stream")) + }) + .boxed(); + OptimisticBlockStream { + inner, + } + } +} + +/// A stream for receiving committed blocks from the sequencer. +pub(crate) struct BlockCommitmentStream { + inner: BoxStream<'static, eyre::Result>, +} + +impl Stream for BlockCommitmentStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_next_unpin(cx) + } +} + +struct InnerBlockCommitmentStream { + inner: tonic::Streaming, +} + +impl Stream for InnerBlockCommitmentStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + use astria_core::generated::astria::sequencerblock::optimistic::v1alpha1 as raw; + + let Some(res) = std::task::ready!(self.inner.poll_next_unpin(cx)) else { + return Poll::Ready(None); + }; + + let raw = res + .wrap_err("failed receiving message over stream")? + .commitment + .ok_or_else(|| { + eyre!( + "expected field `{}.commitment` was not set", + GetBlockCommitmentStreamResponse::full_name() + ) + })?; + + let commitment = Commitment::try_from_raw(&raw).wrap_err_with(|| { + format!( + "failed to validate message `{}` received from server", + raw::SequencerBlockCommit::full_name() + ) + })?; + + Poll::Ready(Some(Ok(commitment))) + } +} + +pub(crate) struct OptimisticBlockStream { + inner: BoxStream<'static, eyre::Result>, +} + +impl Stream for OptimisticBlockStream { + type Item = eyre::Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_next_unpin(cx) + } +} + +struct InnerOptimisticBlockStream { + inner: tonic::Streaming, +} + +impl Stream for InnerOptimisticBlockStream { + type Item = eyre::Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context, + ) -> std::task::Poll> { + let Some(item) = ready!(self.inner.poll_next_unpin(cx)) else { + return Poll::Ready(None); + }; + let raw = item + .wrap_err("failed receiving message over stream")? + .block + .ok_or_else(|| { + eyre!( + "expected field `{}.block` was not set", + GetOptimisticBlockStreamRequest::full_name() + ) + })?; + + let optimistic_block = FilteredSequencerBlock::try_from_raw(raw).wrap_err_with(|| { + format!( + "failed to validate `{}`", + FilteredSequencerBlock::full_name() + ) + })?; + + std::task::Poll::Ready(Some(Ok(optimistic_block))) + } +} diff --git a/crates/astria-auctioneer/src/streaming_utils.rs b/crates/astria-auctioneer/src/streaming_utils.rs new file mode 100644 index 0000000000..514ff7458b --- /dev/null +++ b/crates/astria-auctioneer/src/streaming_utils.rs @@ -0,0 +1,147 @@ +use std::{ + marker::PhantomData, + pin::Pin, + task::{ + ready, + Poll, + }, + time::Duration, +}; + +use astria_eyre::eyre::{ + self, + WrapErr as _, +}; +use bytes::Bytes; +use futures::{ + Future, + FutureExt as _, + Stream, + StreamExt as _, +}; +use http::{ + Request, + Response, +}; +use http_body::combinators::UnsyncBoxBody; +use pin_project_lite::pin_project; +use tonic::{ + transport::Channel, + Status, +}; +use tower::{ + util::BoxCloneService, + ServiceBuilder, +}; +use tower_http::{ + map_response_body::MapResponseBodyLayer, + trace::{ + DefaultMakeSpan, + TraceLayer, + }, +}; + +pub(crate) type InstrumentedChannel = BoxCloneService< + Request>, + Response>, + tonic::transport::Error, +>; + +pub(crate) fn make_instrumented_channel(uri: &str) -> eyre::Result { + let channel = Channel::from_shared(uri.to_string()) + .wrap_err("failed to create a channel to the provided uri")? + .timeout(Duration::from_secs(2)) + .connect_timeout(Duration::from_secs(5)) + .connect_lazy(); + + let channel = ServiceBuilder::new() + .layer(MapResponseBodyLayer::new(UnsyncBoxBody::new)) + .layer( + TraceLayer::new_for_grpc().make_span_with(DefaultMakeSpan::new().include_headers(true)), + ) + .service(channel); + + Ok(InstrumentedChannel::new(channel)) +} + +pub(crate) fn restarting_stream(f: F) -> RestartingStream +where + F: Fn() -> Fut, + Fut: Future>, + S: Stream>, +{ + let opening_stream = Some(f()); + RestartingStream { + f, + opening_stream, + running_stream: None, + _phantom_data: PhantomData, + } +} + +// TODO: Adds logs. +// +// Specifically explain why Fut returns Option, and how to return +// an error to the user (tracing). +pin_project! { + pub(crate) struct RestartingStream { + f: F, + #[pin] + opening_stream: Option, + #[pin] + running_stream: Option, + _phantom_data: PhantomData>, + } +} + +impl Stream for RestartingStream +where + F: Fn() -> Fut, + Fut: Future>, + S: Stream>, +{ + type Item = S::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll> { + let mut this = self.project(); + + if this.opening_stream.is_some() { + debug_assert!(this.running_stream.is_none()); + + let open_output = ready!( + this.opening_stream + .as_mut() + .as_pin_mut() + .expect("inside a branch that checks opening_stream == Some") + .poll_unpin(cx) + ); + + // The future has completed, unset it so it will not be polled again. + Pin::set(&mut this.opening_stream, None); + match open_output { + Ok(stream) => Pin::set(&mut this.running_stream, Some(stream)), + Err(err) => return Poll::Ready(Some(Err(err))), + } + } + + if this.running_stream.is_some() { + debug_assert!(this.opening_stream.is_none()); + + if let Some(item) = ready!( + this.running_stream + .as_mut() + .as_pin_mut() + .expect("inside a branch that checks running_stream == Some") + .poll_next_unpin(cx) + ) { + return Poll::Ready(Some(item)); + } + + Pin::set(&mut this.running_stream, None); + Pin::set(&mut this.opening_stream, Some((*this.f)())); + return Poll::Pending; + } + + Poll::Ready(None) + } +} diff --git a/crates/astria-bridge-withdrawer/src/bridge_withdrawer/mod.rs b/crates/astria-bridge-withdrawer/src/bridge_withdrawer/mod.rs index b1aa923f72..aeb0f1a721 100644 --- a/crates/astria-bridge-withdrawer/src/bridge_withdrawer/mod.rs +++ b/crates/astria-bridge-withdrawer/src/bridge_withdrawer/mod.rs @@ -4,7 +4,7 @@ use std::{ time::Duration, }; -use astria_core::generated::sequencerblock::v1::sequencer_service_client::SequencerServiceClient; +use astria_core::generated::astria::sequencerblock::v1::sequencer_service_client::SequencerServiceClient; use astria_eyre::eyre::{ self, WrapErr as _, diff --git a/crates/astria-bridge-withdrawer/src/bridge_withdrawer/startup.rs b/crates/astria-bridge-withdrawer/src/bridge_withdrawer/startup.rs index 033b53f0a2..c80c170cab 100644 --- a/crates/astria-bridge-withdrawer/src/bridge_withdrawer/startup.rs +++ b/crates/astria-bridge-withdrawer/src/bridge_withdrawer/startup.rs @@ -4,7 +4,7 @@ use std::{ }; use astria_core::{ - generated::sequencerblock::v1::sequencer_service_client::{ + generated::astria::sequencerblock::v1::sequencer_service_client::{ self, SequencerServiceClient, }, @@ -270,20 +270,21 @@ impl Startup { the sequencer logic." ); - let proto_tx = astria_core::generated::protocol::transaction::v1::Transaction::decode( - &*last_transaction.tx, - ) - .wrap_err_with(|| { - format!( + let proto_tx = + astria_core::generated::astria::protocol::transaction::v1::Transaction::decode( + &*last_transaction.tx, + ) + .wrap_err_with(|| { + format!( "failed to decode data in Sequencer CometBFT transaction as `{}`", - astria_core::generated::protocol::transaction::v1::Transaction::full_name(), + astria_core::generated::astria::protocol::transaction::v1::Transaction::full_name(), ) - })?; + })?; let tx = Transaction::try_from_raw(proto_tx).wrap_err_with(|| { format!( "failed to verify {}", - astria_core::generated::protocol::transaction::v1::Transaction::full_name() + astria_core::generated::astria::protocol::transaction::v1::Transaction::full_name() ) })?; diff --git a/crates/astria-bridge-withdrawer/src/bridge_withdrawer/submitter/builder.rs b/crates/astria-bridge-withdrawer/src/bridge_withdrawer/submitter/builder.rs index 7b5593a0aa..62308d1572 100644 --- a/crates/astria-bridge-withdrawer/src/bridge_withdrawer/submitter/builder.rs +++ b/crates/astria-bridge-withdrawer/src/bridge_withdrawer/submitter/builder.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use astria_core::generated::sequencerblock::v1::sequencer_service_client::SequencerServiceClient; +use astria_core::generated::astria::sequencerblock::v1::sequencer_service_client::SequencerServiceClient; use astria_eyre::eyre::{ self, Context as _, diff --git a/crates/astria-bridge-withdrawer/src/bridge_withdrawer/submitter/mod.rs b/crates/astria-bridge-withdrawer/src/bridge_withdrawer/submitter/mod.rs index 0bd72886a5..a7c195c862 100644 --- a/crates/astria-bridge-withdrawer/src/bridge_withdrawer/submitter/mod.rs +++ b/crates/astria-bridge-withdrawer/src/bridge_withdrawer/submitter/mod.rs @@ -4,7 +4,7 @@ use std::{ }; use astria_core::{ - generated::sequencerblock::v1::{ + generated::astria::sequencerblock::v1::{ sequencer_service_client::{ self, SequencerServiceClient, diff --git a/crates/astria-bridge-withdrawer/tests/blackbox/helpers/mock_cometbft.rs b/crates/astria-bridge-withdrawer/tests/blackbox/helpers/mock_cometbft.rs index c3f23d53fc..f0e4d0cef4 100644 --- a/crates/astria-bridge-withdrawer/tests/blackbox/helpers/mock_cometbft.rs +++ b/crates/astria-bridge-withdrawer/tests/blackbox/helpers/mock_cometbft.rs @@ -332,7 +332,7 @@ fn prepare_broadcast_tx_sync_response(response: tx_sync::Response) -> Mock { /// Convert a wiremock request to an astria transaction pub fn tx_from_request(request: &wiremock::Request) -> Transaction { - use astria_core::generated::protocol::transaction::v1::Transaction as RawTransaction; + use astria_core::generated::astria::protocol::transaction::v1::Transaction as RawTransaction; use prost::Message as _; let wrapped_tx_sync_req: tendermint_rpc::request::Wrapper = diff --git a/crates/astria-bridge-withdrawer/tests/blackbox/helpers/mock_sequencer.rs b/crates/astria-bridge-withdrawer/tests/blackbox/helpers/mock_sequencer.rs index 66dc282e31..9a2971b790 100644 --- a/crates/astria-bridge-withdrawer/tests/blackbox/helpers/mock_sequencer.rs +++ b/crates/astria-bridge-withdrawer/tests/blackbox/helpers/mock_sequencer.rs @@ -5,7 +5,7 @@ use std::{ use astria_core::{ self, - generated::sequencerblock::v1::{ + generated::astria::sequencerblock::v1::{ sequencer_service_server::{ SequencerService, SequencerServiceServer, diff --git a/crates/astria-cli/CHANGELOG.md b/crates/astria-cli/CHANGELOG.md index 09bc9674b2..738181bef5 100644 --- a/crates/astria-cli/CHANGELOG.md +++ b/crates/astria-cli/CHANGELOG.md @@ -9,6 +9,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Add `fee-assets` subcommand to `sequencer` CLI [#1816](https://github.com/astriaorg/astria/pull/1816). + ### Fixed - Fixed ICS20 withdrawal source when using channel with more than one diff --git a/crates/astria-cli/src/sequencer/fee_assets.rs b/crates/astria-cli/src/sequencer/fee_assets.rs new file mode 100644 index 0000000000..b5fa567e5c --- /dev/null +++ b/crates/astria-cli/src/sequencer/fee_assets.rs @@ -0,0 +1,58 @@ +use astria_sequencer_client::{ + HttpClient, + SequencerClientExt as _, +}; +use clap::Subcommand; +use color_eyre::eyre::{ + self, + WrapErr as _, +}; + +#[derive(Debug, clap::Args)] +pub(super) struct Command { + #[command(subcommand)] + command: SubCommand, +} + +impl Command { + pub(super) async fn run(self) -> eyre::Result<()> { + let SubCommand::Get(get) = self.command; + get.run().await + } +} + +#[derive(Debug, Subcommand)] +enum SubCommand { + /// Get the balance of a Sequencer account + Get(Get), +} + +#[derive(clap::Args, Debug)] +struct Get { + /// The url of the Sequencer node + #[arg( + long, + env = "SEQUENCER_URL", + default_value = crate::DEFAULT_SEQUENCER_RPC + )] + sequencer_url: String, +} + +impl Get { + async fn run(self) -> eyre::Result<()> { + let sequencer_client = HttpClient::new(self.sequencer_url.as_str()) + .wrap_err("failed constructing http sequencer client")?; + + let res = sequencer_client + .get_allowed_fee_assets() + .await + .wrap_err("failed to get fee assets")?; + + println!("Allowed fee assets:"); + for asset in res.fee_assets { + println!(" {asset}"); + } + + Ok(()) + } +} diff --git a/crates/astria-cli/src/sequencer/mod.rs b/crates/astria-cli/src/sequencer/mod.rs index 6beda8d5ec..d8fa0a7233 100644 --- a/crates/astria-cli/src/sequencer/mod.rs +++ b/crates/astria-cli/src/sequencer/mod.rs @@ -8,6 +8,7 @@ mod block_height; mod bridge_account; mod bridge_lock; mod bridge_sudo_change; +mod fee_assets; mod ics20_withdrawal; mod init_bridge_account; mod sign; @@ -39,6 +40,7 @@ impl Command { SubCommand::Sign(sign) => sign.run(), SubCommand::BridgeSudoChange(bridge_sudo_change) => bridge_sudo_change.run().await, SubCommand::BridgeAccount(bridge_account) => bridge_account.run().await, + SubCommand::FeeAssets(fee_assets) => fee_assets.run().await, } } } @@ -80,4 +82,6 @@ enum SubCommand { BridgeSudoChange(bridge_sudo_change::Command), /// Commands for interacting with the bridge account BridgeAccount(bridge_account::Command), + /// Command for interacting with allowed fee assets + FeeAssets(fee_assets::Command), } diff --git a/crates/astria-cli/src/sequencer/threshold/sign.rs b/crates/astria-cli/src/sequencer/threshold/sign.rs index 93e65987fc..b5a3830f24 100644 --- a/crates/astria-cli/src/sequencer/threshold/sign.rs +++ b/crates/astria-cli/src/sequencer/threshold/sign.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use astria_core::generated::protocol::transaction::v1::{ +use astria_core::generated::astria::protocol::transaction::v1::{ Transaction, TransactionBody, }; diff --git a/crates/astria-cli/src/sequencer/threshold/verify.rs b/crates/astria-cli/src/sequencer/threshold/verify.rs index f616cb4475..ec4d027b46 100644 --- a/crates/astria-cli/src/sequencer/threshold/verify.rs +++ b/crates/astria-cli/src/sequencer/threshold/verify.rs @@ -1,4 +1,4 @@ -use astria_core::generated::protocol::transaction::v1::TransactionBody; +use astria_core::generated::astria::protocol::transaction::v1::TransactionBody; use color_eyre::eyre::{ self, WrapErr as _, diff --git a/crates/astria-composer/src/collectors/grpc.rs b/crates/astria-composer/src/collectors/grpc.rs index 18d95adb93..deeb580546 100644 --- a/crates/astria-composer/src/collectors/grpc.rs +++ b/crates/astria-composer/src/collectors/grpc.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use astria_core::{ - generated::composer::v1::{ + generated::astria::composer::v1::{ grpc_collector_service_server::GrpcCollectorService, SubmitRollupTransactionRequest, SubmitRollupTransactionResponse, diff --git a/crates/astria-composer/src/executor/builder.rs b/crates/astria-composer/src/executor/builder.rs index c388a63121..31a8ee04fa 100644 --- a/crates/astria-composer/src/executor/builder.rs +++ b/crates/astria-composer/src/executor/builder.rs @@ -6,7 +6,7 @@ use std::{ use astria_core::{ crypto::SigningKey, - generated::sequencerblock::v1::sequencer_service_client::SequencerServiceClient, + generated::astria::sequencerblock::v1::sequencer_service_client::SequencerServiceClient, primitive::v1::Address, protocol::transaction::v1::action::RollupDataSubmission, }; diff --git a/crates/astria-composer/src/executor/mod.rs b/crates/astria-composer/src/executor/mod.rs index a07b2e4fe6..62ed88e966 100644 --- a/crates/astria-composer/src/executor/mod.rs +++ b/crates/astria-composer/src/executor/mod.rs @@ -12,7 +12,7 @@ use std::{ use astria_core::{ crypto::SigningKey, - generated::sequencerblock::v1::{ + generated::astria::sequencerblock::v1::{ sequencer_service_client::{ self, SequencerServiceClient, diff --git a/crates/astria-composer/src/grpc.rs b/crates/astria-composer/src/grpc.rs index f9453b36ef..7c1d3b8ef1 100644 --- a/crates/astria-composer/src/grpc.rs +++ b/crates/astria-composer/src/grpc.rs @@ -9,7 +9,7 @@ use std::net::SocketAddr; use astria_core::{ - generated::composer::v1::grpc_collector_service_server::GrpcCollectorServiceServer, + generated::astria::composer::v1::grpc_collector_service_server::GrpcCollectorServiceServer, primitive::v1::asset, }; use astria_eyre::{ diff --git a/crates/astria-composer/tests/blackbox/grpc_collector.rs b/crates/astria-composer/tests/blackbox/grpc_collector.rs index f8286a25d8..18767ab0ab 100644 --- a/crates/astria-composer/tests/blackbox/grpc_collector.rs +++ b/crates/astria-composer/tests/blackbox/grpc_collector.rs @@ -1,7 +1,7 @@ use std::time::Duration; use astria_core::{ - generated::composer::v1::{ + generated::astria::composer::v1::{ grpc_collector_service_client::GrpcCollectorServiceClient, SubmitRollupTransactionRequest, }, diff --git a/crates/astria-composer/tests/blackbox/helper/mock_grpc_sequencer.rs b/crates/astria-composer/tests/blackbox/helper/mock_grpc_sequencer.rs index 23be34c31a..0897610967 100644 --- a/crates/astria-composer/tests/blackbox/helper/mock_grpc_sequencer.rs +++ b/crates/astria-composer/tests/blackbox/helper/mock_grpc_sequencer.rs @@ -5,7 +5,7 @@ use std::{ use astria_core::{ self, - generated::sequencerblock::v1::{ + generated::astria::sequencerblock::v1::{ sequencer_service_server::{ SequencerService, SequencerServiceServer, diff --git a/crates/astria-composer/tests/blackbox/helper/mod.rs b/crates/astria-composer/tests/blackbox/helper/mod.rs index 93f7f6a1a7..fca7da0f41 100644 --- a/crates/astria-composer/tests/blackbox/helper/mod.rs +++ b/crates/astria-composer/tests/blackbox/helper/mod.rs @@ -207,7 +207,7 @@ pub async fn loop_until_composer_is_ready(addr: SocketAddr) { } fn signed_tx_from_request(request: &Request) -> Transaction { - use astria_core::generated::protocol::transaction::v1::Transaction as RawTransaction; + use astria_core::generated::astria::protocol::transaction::v1::Transaction as RawTransaction; use prost::Message as _; let wrapped_tx_sync_req: request::Wrapper = diff --git a/crates/astria-conductor/src/celestia/block_verifier.rs b/crates/astria-conductor/src/celestia/block_verifier.rs index ff72aa8a7c..e06dff816e 100644 --- a/crates/astria-conductor/src/celestia/block_verifier.rs +++ b/crates/astria-conductor/src/celestia/block_verifier.rs @@ -221,10 +221,13 @@ mod tests { use std::collections::BTreeMap; use astria_core::{ - generated::sequencerblock::v1::SequencerBlockHeader as RawSequencerBlockHeader, + generated::astria::sequencerblock::v1::SequencerBlockHeader as RawSequencerBlockHeader, primitive::v1::RollupId, sequencerblock::v1::{ - block::SequencerBlockHeader, + block::{ + BlockHash, + SequencerBlockHeader, + }, celestia::UncheckedSubmittedMetadata, }, }; @@ -345,7 +348,7 @@ mod tests { let header = SequencerBlockHeader::try_from_raw(header).unwrap(); let sequencer_blob = UncheckedSubmittedMetadata { - block_hash: [0u8; 32], + block_hash: BlockHash::new([0u8; 32]), header, rollup_ids: vec![], rollup_transactions_proof, @@ -390,7 +393,7 @@ mod tests { let header = SequencerBlockHeader::try_from_raw(header).unwrap(); let sequencer_blob = UncheckedSubmittedMetadata { - block_hash: [0u8; 32], + block_hash: BlockHash::new([0u8; 32]), header, rollup_ids: vec![rollup_id], rollup_transactions_proof, diff --git a/crates/astria-conductor/src/celestia/convert.rs b/crates/astria-conductor/src/celestia/convert.rs index c2772d4f15..02861136d1 100644 --- a/crates/astria-conductor/src/celestia/convert.rs +++ b/crates/astria-conductor/src/celestia/convert.rs @@ -1,6 +1,6 @@ use astria_core::{ brotli::decompress_bytes, - generated::sequencerblock::v1::{ + generated::astria::sequencerblock::v1::{ SubmittedMetadataList, SubmittedRollupDataList, }, diff --git a/crates/astria-conductor/src/celestia/mod.rs b/crates/astria-conductor/src/celestia/mod.rs index 85146dc645..44c5924208 100644 --- a/crates/astria-conductor/src/celestia/mod.rs +++ b/crates/astria-conductor/src/celestia/mod.rs @@ -6,7 +6,10 @@ use std::{ use astria_core::{ primitive::v1::RollupId, - sequencerblock::v1::block::SequencerBlockHeader, + sequencerblock::v1::block::{ + BlockHash, + SequencerBlockHeader, + }, }; use astria_eyre::eyre::{ self, @@ -103,12 +106,16 @@ use crate::{ #[derive(Clone, Debug)] pub(crate) struct ReconstructedBlock { pub(crate) celestia_height: u64, - pub(crate) block_hash: [u8; 32], + pub(crate) block_hash: BlockHash, pub(crate) header: SequencerBlockHeader, pub(crate) transactions: Vec, } impl ReconstructedBlock { + pub(crate) fn block_hash(&self) -> &BlockHash { + &self.block_hash + } + pub(crate) fn sequencer_height(&self) -> SequencerHeight { self.header.height() } @@ -458,7 +465,7 @@ impl RunningReader { error = %eyre::Report::new(e), source_celestia_height = celestia_height, sequencer_height, - block_hash = %base64(&block_hash), + block_hash = %block_hash, "failed pushing reconstructed block into sequential cache; dropping it", ); } diff --git a/crates/astria-conductor/src/celestia/reconstruct.rs b/crates/astria-conductor/src/celestia/reconstruct.rs index 3ee84f93be..a66dbfc8ef 100644 --- a/crates/astria-conductor/src/celestia/reconstruct.rs +++ b/crates/astria-conductor/src/celestia/reconstruct.rs @@ -3,12 +3,12 @@ use std::collections::HashMap; use astria_core::{ primitive::v1::RollupId, sequencerblock::v1::{ + block::BlockHash, celestia::UncheckedSubmittedMetadata, SubmittedMetadata, SubmittedRollupData, }, }; -use telemetry::display::base64; use tracing::{ info, warn, @@ -72,7 +72,7 @@ pub(super) fn reconstruct_blocks_from_verified_blobs( "no sequencer header blob matching the rollup blob's block hash found" }; info!( - block_hash = %base64(&rollup.sequencer_block_hash()), + block_hash = %rollup.sequencer_block_hash(), reason, "dropping rollup blob", ); @@ -83,7 +83,7 @@ pub(super) fn reconstruct_blocks_from_verified_blobs( for header_blob in header_blobs.into_values() { if header_blob.contains_rollup_id(rollup_id) { warn!( - block_hash = %base64(header_blob.block_hash()), + block_hash = %header_blob.block_hash(), "sequencer header blob contains the target rollup ID, but no matching rollup blob was found; dropping it", ); } else { @@ -99,7 +99,7 @@ pub(super) fn reconstruct_blocks_from_verified_blobs( } fn remove_header_blob_matching_rollup_blob( - headers: &mut HashMap<[u8; 32], SubmittedMetadata>, + headers: &mut HashMap, rollup: &SubmittedRollupData, ) -> Option { // chaining methods and returning () to use the ? operator and to not bind the value diff --git a/crates/astria-conductor/src/celestia/reporting.rs b/crates/astria-conductor/src/celestia/reporting.rs index cd16454b6f..284bc4ac6c 100644 --- a/crates/astria-conductor/src/celestia/reporting.rs +++ b/crates/astria-conductor/src/celestia/reporting.rs @@ -53,7 +53,8 @@ impl<'a> Serialize for ReportReconstructedBlock<'a> { ]; let mut state = serializer.serialize_struct("ReconstructedBlockInfo", FIELDS.len())?; state.serialize_field(FIELDS[0], &self.0.celestia_height)?; - state.serialize_field(FIELDS[1], &base64(&self.0.block_hash))?; + // TODO: use the block hash's Display impl for this + state.serialize_field(FIELDS[1], &base64(&*self.0.block_hash))?; state.serialize_field(FIELDS[2], &self.0.transactions.len())?; state.serialize_field(FIELDS[3], &self.0.celestia_height)?; state.end() diff --git a/crates/astria-conductor/src/celestia/verify.rs b/crates/astria-conductor/src/celestia/verify.rs index 3ac09fa9d1..973a0f007f 100644 --- a/crates/astria-conductor/src/celestia/verify.rs +++ b/crates/astria-conductor/src/celestia/verify.rs @@ -5,6 +5,7 @@ use std::{ }; use astria_core::sequencerblock::v1::{ + block::BlockHash, SubmittedMetadata, SubmittedRollupData, }; @@ -25,7 +26,6 @@ use sequencer_client::{ Client as _, HttpClient as SequencerClient, }; -use telemetry::display::base64; use tokio_util::task::JoinMap; use tower::{ util::BoxService, @@ -58,7 +58,7 @@ use crate::executor::{ pub(super) struct VerifiedBlobs { celestia_height: u64, - header_blobs: HashMap<[u8; 32], SubmittedMetadata>, + header_blobs: HashMap, rollup_blobs: Vec, } @@ -75,7 +75,7 @@ impl VerifiedBlobs { self, ) -> ( u64, - HashMap<[u8; 32], SubmittedMetadata>, + HashMap, Vec, ) { (self.celestia_height, self.header_blobs, self.rollup_blobs) @@ -88,7 +88,7 @@ impl VerifiedBlobs { #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] struct VerificationTaskKey { index: usize, - block_hash: [u8; 32], + block_hash: BlockHash, sequencer_height: SequencerHeight, } @@ -142,7 +142,7 @@ pub(super) async fn verify_metadata( .get(dropped_entry.block_hash()) .expect("must exist; just inserted an item under the same key"); info!( - block_hash = %base64(&dropped_entry.block_hash()), + block_hash = %dropped_entry.block_hash(), dropped_blob.sequencer_height = dropped_entry.height().value(), accepted_blob.sequencer_height = accepted_entry.height().value(), "two Sequencer header blobs were well formed and validated against \ @@ -154,7 +154,7 @@ pub(super) async fn verify_metadata( Ok(None) => {} Err(error) => { info!( - block_hash = %base64(&key.block_hash), + block_hash = %key.block_hash, sequencer_height = %key.sequencer_height, %error, "verification of sequencer blob was cancelled abruptly; dropping it" @@ -552,10 +552,13 @@ fn ensure_chain_ids_match(in_commit: &str, in_header: &str) -> eyre::Result<()> Ok(()) } -fn ensure_block_hashes_match(in_commit: &[u8], in_header: &[u8]) -> eyre::Result<()> { +fn ensure_block_hashes_match(in_commit: &[u8], in_header: &BlockHash) -> eyre::Result<()> { use base64::prelude::*; + // NOTE: we still re-encode the block hash using base64 instead of its display impl + // to ensure that the formatting of the two byte slices doesn't accidentally go + // out of whack should the display impl change. ensure!( - in_commit == in_header, + in_commit == in_header.as_ref(), "expected block hash `{}` (from commit), but found `{}` in retrieved metadata", BASE64_STANDARD.encode(in_commit), BASE64_STANDARD.encode(in_header), diff --git a/crates/astria-conductor/src/executor/client.rs b/crates/astria-conductor/src/executor/client.rs index bbd0b550cf..1bab377d97 100644 --- a/crates/astria-conductor/src/executor/client.rs +++ b/crates/astria-conductor/src/executor/client.rs @@ -6,7 +6,7 @@ use astria_core::{ CommitmentState, GenesisInfo, }, - generated::{ + generated::astria::{ execution::{ v1 as raw, v1::execution_service_client::ExecutionServiceClient, diff --git a/crates/astria-conductor/src/executor/mod.rs b/crates/astria-conductor/src/executor/mod.rs index 8c3155ca8b..3d1a32d30c 100644 --- a/crates/astria-conductor/src/executor/mod.rs +++ b/crates/astria-conductor/src/executor/mod.rs @@ -7,6 +7,7 @@ use astria_core::{ }, primitive::v1::RollupId, sequencerblock::v1::block::{ + BlockHash, FilteredSequencerBlock, FilteredSequencerBlockParts, }, @@ -285,7 +286,7 @@ impl Executor { { debug_span!("conductor::Executor::run_until_stopped").in_scope(||debug!( block.height = %block.sequencer_height(), - block.hash = %telemetry::display::base64(&block.block_hash), + block.hash = %block.block_hash(), "received block from celestia reader", )); if let Err(error) = self.execute_firm(block).await { @@ -298,7 +299,7 @@ impl Executor { { debug_span!("conductor::Executor::run_until_stopped").in_scope(||debug!( block.height = %block.height(), - block.hash = %telemetry::display::base64(&block.block_hash()), + block.hash = %block.block_hash(), "received block from sequencer reader", )); if let Err(error) = self.execute_soft(block).await { @@ -388,7 +389,7 @@ impl Executor { } #[instrument(skip_all, fields( - block.hash = %telemetry::display::base64(&block.block_hash()), + block.hash = %block.block_hash(), block.height = block.height().value(), err, ))] @@ -452,7 +453,7 @@ impl Executor { } #[instrument(skip_all, fields( - block.hash = %telemetry::display::base64(&block.block_hash), + block.hash = %block.block_hash(), block.height = block.sequencer_height().value(), err, ))] @@ -532,7 +533,7 @@ impl Executor { /// This function is called via [`Executor::execute_firm`] or [`Executor::execute_soft`], /// and should not be called directly. #[instrument(skip_all, fields( - block.hash = %telemetry::display::base64(&block.hash), + block.hash = %block.hash, block.height = block.height.value(), block.num_of_transactions = block.transactions.len(), rollup.parent_hash = %telemetry::display::base64(&parent_hash), @@ -690,7 +691,7 @@ enum Update { #[derive(Debug)] struct ExecutableBlock { - hash: [u8; 32], + hash: BlockHash, height: SequencerHeight, timestamp: pbjson_types::Timestamp, transactions: Vec, diff --git a/crates/astria-conductor/src/executor/state.rs b/crates/astria-conductor/src/executor/state.rs index bfe794e6c1..2ae4e2c4b2 100644 --- a/crates/astria-conductor/src/executor/state.rs +++ b/crates/astria-conductor/src/executor/state.rs @@ -344,7 +344,7 @@ pub(super) fn map_sequencer_height_to_rollup_height( #[cfg(test)] mod tests { use astria_core::{ - generated::execution::v1 as raw, + generated::astria::execution::v1 as raw, Protobuf as _, }; use pbjson_types::Timestamp; diff --git a/crates/astria-conductor/src/executor/tests.rs b/crates/astria-conductor/src/executor/tests.rs index 164d8b0a20..e8ead9dc71 100644 --- a/crates/astria-conductor/src/executor/tests.rs +++ b/crates/astria-conductor/src/executor/tests.rs @@ -5,7 +5,7 @@ use astria_core::{ CommitmentState, GenesisInfo, }, - generated::execution::v1 as raw, + generated::astria::execution::v1 as raw, Protobuf as _, }; use bytes::Bytes; diff --git a/crates/astria-conductor/src/sequencer/client.rs b/crates/astria-conductor/src/sequencer/client.rs index 760a5ede27..e0e61d2229 100644 --- a/crates/astria-conductor/src/sequencer/client.rs +++ b/crates/astria-conductor/src/sequencer/client.rs @@ -3,7 +3,7 @@ use std::time::Duration; use astria_core::{ - generated::sequencerblock::v1::{ + generated::astria::sequencerblock::v1::{ sequencer_service_client::SequencerServiceClient, GetFilteredSequencerBlockRequest, }, diff --git a/crates/astria-conductor/tests/blackbox/firm_only.rs b/crates/astria-conductor/tests/blackbox/firm_only.rs index 2fb2c43148..e634292a1c 100644 --- a/crates/astria-conductor/tests/blackbox/firm_only.rs +++ b/crates/astria-conductor/tests/blackbox/firm_only.rs @@ -5,7 +5,7 @@ use astria_conductor::{ Conductor, Config, }; -use astria_core::generated::execution::v1::{ +use astria_core::generated::astria::execution::v1::{ GetCommitmentStateRequest, GetGenesisInfoRequest, }; diff --git a/crates/astria-conductor/tests/blackbox/helpers/macros.rs b/crates/astria-conductor/tests/blackbox/helpers/macros.rs index 4119fe1bf6..981f4c31dc 100644 --- a/crates/astria-conductor/tests/blackbox/helpers/macros.rs +++ b/crates/astria-conductor/tests/blackbox/helpers/macros.rs @@ -1,7 +1,7 @@ #[macro_export] macro_rules! block { (number: $number:expr,hash: $hash:expr,parent: $parent:expr $(,)?) => { - ::astria_core::generated::execution::v1::Block { + ::astria_core::generated::astria::execution::v1::Block { number: $number, hash: ::bytes::Bytes::from(Vec::from($hash)), parent_block_hash: ::bytes::Bytes::from(Vec::from($parent)), @@ -59,7 +59,7 @@ macro_rules! commitment_state { soft: (number: $soft_number:expr,hash: $soft_hash:expr,parent: $soft_parent:expr $(,)?), base_celestia_height: $base_celestia_height:expr $(,)? ) => { - ::astria_core::generated::execution::v1::CommitmentState { + ::astria_core::generated::astria::execution::v1::CommitmentState { firm: Some($crate::block!( number: $firm_number, hash: $firm_hash, @@ -98,7 +98,7 @@ macro_rules! genesis_info { $sequencer_height:expr,celestia_block_variance: $variance:expr $(,)? ) => { - ::astria_core::generated::execution::v1::GenesisInfo { + ::astria_core::generated::astria::execution::v1::GenesisInfo { rollup_id: Some($crate::ROLLUP_ID.to_raw()), sequencer_genesis_block_height: $sequencer_height, celestia_block_variance: $variance, @@ -312,7 +312,7 @@ macro_rules! mount_get_filtered_sequencer_block { ($test_env:ident, sequencer_height: $height:expr, delay: $delay:expr $(,)?) => { $test_env .mount_get_filtered_sequencer_block( - ::astria_core::generated::sequencerblock::v1::GetFilteredSequencerBlockRequest { + ::astria_core::generated::astria::sequencerblock::v1::GetFilteredSequencerBlockRequest { height: $height, rollup_ids: vec![$crate::ROLLUP_ID.to_raw()], }, @@ -385,12 +385,12 @@ macro_rules! mount_get_block { hash: $hash, parent: $parent, ); - let identifier = ::astria_core::generated::execution::v1::BlockIdentifier { + let identifier = ::astria_core::generated::astria::execution::v1::BlockIdentifier { identifier: Some( - ::astria_core::generated::execution::v1::block_identifier::Identifier::BlockNumber(block.number) + ::astria_core::generated::astria::execution::v1::block_identifier::Identifier::BlockNumber(block.number) )}; $test_env.mount_get_block( - ::astria_core::generated::execution::v1::GetBlockRequest { + ::astria_core::generated::astria::execution::v1::GetBlockRequest { identifier: Some(identifier), }, block, diff --git a/crates/astria-conductor/tests/blackbox/helpers/mock_grpc.rs b/crates/astria-conductor/tests/blackbox/helpers/mock_grpc.rs index c0b8b7ff2f..457ef04e3d 100644 --- a/crates/astria-conductor/tests/blackbox/helpers/mock_grpc.rs +++ b/crates/astria-conductor/tests/blackbox/helpers/mock_grpc.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use astria_core::generated::{ +use astria_core::generated::astria::{ execution::v1::{ execution_service_server::{ ExecutionService, diff --git a/crates/astria-conductor/tests/blackbox/helpers/mod.rs b/crates/astria-conductor/tests/blackbox/helpers/mod.rs index da7dad5061..c14ffff018 100644 --- a/crates/astria-conductor/tests/blackbox/helpers/mod.rs +++ b/crates/astria-conductor/tests/blackbox/helpers/mod.rs @@ -12,7 +12,7 @@ use astria_conductor::{ }; use astria_core::{ brotli::compress_bytes, - generated::{ + generated::astria::{ execution::v1::{ Block, CommitmentState, @@ -179,7 +179,7 @@ impl TestConductor { pub async fn mount_get_block( &self, expected_pbjson: S, - block: astria_core::generated::execution::v1::Block, + block: astria_core::generated::astria::execution::v1::Block, ) { use astria_grpc_mock::{ matcher::message_partial_pbjson, @@ -306,7 +306,7 @@ impl TestConductor { } pub async fn mount_get_genesis_info(&self, genesis_info: GenesisInfo) { - use astria_core::generated::execution::v1::GetGenesisInfoRequest; + use astria_core::generated::astria::execution::v1::GetGenesisInfoRequest; astria_grpc_mock::Mock::for_rpc_given( "get_genesis_info", astria_grpc_mock::matcher::message_type::(), @@ -318,7 +318,7 @@ impl TestConductor { } pub async fn mount_get_commitment_state(&self, commitment_state: CommitmentState) { - use astria_core::generated::execution::v1::GetCommitmentStateRequest; + use astria_core::generated::astria::execution::v1::GetCommitmentStateRequest; astria_grpc_mock::Mock::for_rpc_given( "get_commitment_state", @@ -381,7 +381,7 @@ impl TestConductor { commitment_state: CommitmentState, expected_calls: u64, ) -> astria_grpc_mock::MockGuard { - use astria_core::generated::execution::v1::UpdateCommitmentStateRequest; + use astria_core::generated::astria::execution::v1::UpdateCommitmentStateRequest; use astria_grpc_mock::{ matcher::message_partial_pbjson, response::constant_response, @@ -567,7 +567,7 @@ pub struct Blobs { #[must_use] pub fn make_blobs(heights: &[u32]) -> Blobs { - use astria_core::generated::sequencerblock::v1::{ + use astria_core::generated::astria::sequencerblock::v1::{ SubmittedMetadataList, SubmittedRollupDataList, }; @@ -642,7 +642,7 @@ pub fn make_commit(height: u32) -> tendermint::block::Commit { height: height.into(), round: 0u16.into(), block_id: Some(tendermint::block::Id { - hash: tendermint::Hash::Sha256(block_hash), + hash: tendermint::Hash::Sha256(block_hash.get()), part_set_header: tendermint::block::parts::Header::default(), }), timestamp: Some(timestamp), @@ -657,7 +657,7 @@ pub fn make_commit(height: u32) -> tendermint::block::Commit { height: height.into(), round: 0u16.into(), block_id: tendermint::block::Id { - hash: tendermint::Hash::Sha256(block_hash), + hash: tendermint::Hash::Sha256(block_hash.get()), part_set_header: tendermint::block::parts::Header::default(), }, signatures: vec![tendermint::block::CommitSig::BlockIdFlagCommit { diff --git a/crates/astria-conductor/tests/blackbox/soft_only.rs b/crates/astria-conductor/tests/blackbox/soft_only.rs index e82793c638..8fff4e8c1c 100644 --- a/crates/astria-conductor/tests/blackbox/soft_only.rs +++ b/crates/astria-conductor/tests/blackbox/soft_only.rs @@ -5,7 +5,7 @@ use astria_conductor::{ Conductor, Config, }; -use astria_core::generated::execution::v1::{ +use astria_core::generated::astria::execution::v1::{ GetCommitmentStateRequest, GetGenesisInfoRequest, }; diff --git a/crates/astria-core/CHANGELOG.md b/crates/astria-core/CHANGELOG.md index dd487eeab3..7a812add6d 100644 --- a/crates/astria-core/CHANGELOG.md +++ b/crates/astria-core/CHANGELOG.md @@ -15,6 +15,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added method `TracePrefixed::leading_channel` to read the left-most channel of a trace prefixed ICS20 asset [#1768](https://github.com/astriaorg/astria/pull/1768) +### Changed + +- Move all Astria APIs generated from the Protobuf spec from `astria_core::generated` + to `astria_core::generated::astria` + [#1825](https://github.com/astriaorg/astria/pull/1825) + ### Removed - Removed method `TracePrefixed::last_channel` [#1768](https://github.com/astriaorg/astria/pull/1768) diff --git a/crates/astria-core/src/execution/v1/mod.rs b/crates/astria-core/src/execution/v1/mod.rs index 6daca96a42..3327cd0edc 100644 --- a/crates/astria-core/src/execution/v1/mod.rs +++ b/crates/astria-core/src/execution/v1/mod.rs @@ -2,7 +2,7 @@ use bytes::Bytes; use pbjson_types::Timestamp; use crate::{ - generated::execution::v1 as raw, + generated::astria::execution::v1 as raw, primitive::v1::{ IncorrectRollupIdLength, RollupId, @@ -43,7 +43,7 @@ enum GenesisInfoErrorKind { #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr( feature = "serde", - serde(into = "crate::generated::execution::v1::GenesisInfo") + serde(into = "crate::generated::astria::execution::v1::GenesisInfo") )] pub struct GenesisInfo { /// The rollup id which is used to identify the rollup txs. @@ -148,7 +148,7 @@ enum BlockErrorKind { #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr( feature = "serde", - serde(into = "crate::generated::execution::v1::Block") + serde(into = "crate::generated::astria::execution::v1::Block") )] pub struct Block { /// The block number @@ -380,7 +380,7 @@ impl CommitmentStateBuilder { #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr( feature = "serde", - serde(into = "crate::generated::execution::v1::CommitmentState") + serde(into = "crate::generated::astria::execution::v1::CommitmentState") )] pub struct CommitmentState { /// Soft commitment is the rollup block matching latest sequencer block. diff --git a/crates/astria-core/src/generated/astria.protocol.transaction.v1.rs b/crates/astria-core/src/generated/astria.protocol.transaction.v1.rs index 6bd2ba2fdc..0b38628394 100644 --- a/crates/astria-core/src/generated/astria.protocol.transaction.v1.rs +++ b/crates/astria-core/src/generated/astria.protocol.transaction.v1.rs @@ -36,7 +36,7 @@ pub mod action { SudoAddressChange(super::SudoAddressChange), #[prost(message, tag = "51")] ValidatorUpdate( - crate::generated::astria_vendored::tendermint::abci::ValidatorUpdate, + super::super::super::super::super::astria_vendored::tendermint::abci::ValidatorUpdate, ), #[prost(message, tag = "52")] IbcRelayerChange(super::IbcRelayerChange), diff --git a/crates/astria-core/src/generated/astria.protocol.transaction.v1alpha1.rs b/crates/astria-core/src/generated/astria.protocol.transaction.v1alpha1.rs index 75c1bf44b4..a310970f2b 100644 --- a/crates/astria-core/src/generated/astria.protocol.transaction.v1alpha1.rs +++ b/crates/astria-core/src/generated/astria.protocol.transaction.v1alpha1.rs @@ -36,7 +36,7 @@ pub mod action { SudoAddressChange(super::SudoAddressChange), #[prost(message, tag = "51")] ValidatorUpdate( - crate::generated::astria_vendored::tendermint::abci::ValidatorUpdate, + super::super::super::super::super::astria_vendored::tendermint::abci::ValidatorUpdate, ), #[prost(message, tag = "52")] IbcRelayerChange(super::IbcRelayerChange), diff --git a/crates/astria-core/src/generated/mod.rs b/crates/astria-core/src/generated/mod.rs index 963dd5bbe3..41e186e2d7 100644 --- a/crates/astria-core/src/generated/mod.rs +++ b/crates/astria-core/src/generated/mod.rs @@ -38,151 +38,155 @@ pub mod astria_vendored { } #[path = ""] -pub mod bundle { - pub mod v1alpha1 { - include!("astria.bundle.v1alpha1.rs"); - - #[cfg(feature = "serde")] - mod _serde_impl { - use super::*; - include!("astria.bundle.v1alpha1.serde.rs"); - } - } -} - -#[path = ""] -pub mod execution { - pub mod v1 { - include!("astria.execution.v1.rs"); - - #[cfg(feature = "serde")] - mod _serde_impl { - use super::*; - include!("astria.execution.v1.serde.rs"); - } - } -} - -#[path = ""] -pub mod primitive { - pub mod v1 { - include!("astria.primitive.v1.rs"); - - #[cfg(feature = "serde")] - mod _serde_impl { - use super::*; - include!("astria.primitive.v1.serde.rs"); - } - } -} - -#[path = ""] -pub mod protocol { +pub mod astria { #[path = ""] - pub mod accounts { - #[path = "astria.protocol.accounts.v1.rs"] - pub mod v1; - } - #[path = ""] - pub mod asset { - #[path = "astria.protocol.asset.v1.rs"] - pub mod v1; - } - #[path = ""] - pub mod bridge { - #[path = "astria.protocol.bridge.v1.rs"] - pub mod v1; - } - #[path = ""] - pub mod fees { - #[path = "astria.protocol.fees.v1.rs"] - pub mod v1 { - include!("astria.protocol.fees.v1.rs"); + pub mod bundle { + pub mod v1alpha1 { + include!("astria.bundle.v1alpha1.rs"); #[cfg(feature = "serde")] - mod _serde_impls { + mod _serde_impl { use super::*; - include!("astria.protocol.fees.v1.serde.rs"); + include!("astria.bundle.v1alpha1.serde.rs"); } } } + #[path = ""] - pub mod genesis { + pub mod execution { pub mod v1 { - include!("astria.protocol.genesis.v1.rs"); + include!("astria.execution.v1.rs"); #[cfg(feature = "serde")] - mod _serde_impls { + mod _serde_impl { use super::*; - include!("astria.protocol.genesis.v1.serde.rs"); + include!("astria.execution.v1.serde.rs"); } } } + #[path = ""] - pub mod memos { + pub mod primitive { pub mod v1 { - include!("astria.protocol.memos.v1.rs"); + include!("astria.primitive.v1.rs"); #[cfg(feature = "serde")] - mod _serde_impls { + mod _serde_impl { use super::*; - include!("astria.protocol.memos.v1.serde.rs"); + include!("astria.primitive.v1.serde.rs"); } } } + #[path = ""] - pub mod transaction { - pub mod v1 { - include!("astria.protocol.transaction.v1.rs"); + pub mod protocol { + #[path = ""] + pub mod accounts { + #[path = "astria.protocol.accounts.v1.rs"] + pub mod v1; + } + #[path = ""] + pub mod asset { + #[path = "astria.protocol.asset.v1.rs"] + pub mod v1; + } + #[path = ""] + pub mod bridge { + #[path = "astria.protocol.bridge.v1.rs"] + pub mod v1; + } + #[path = ""] + pub mod fees { + #[path = "astria.protocol.fees.v1.rs"] + pub mod v1 { + include!("astria.protocol.fees.v1.rs"); - #[cfg(feature = "serde")] - mod _serde_impl { - use super::*; - include!("astria.protocol.transaction.v1.serde.rs"); + #[cfg(feature = "serde")] + mod _serde_impls { + use super::*; + include!("astria.protocol.fees.v1.serde.rs"); + } } } - } -} + #[path = ""] + pub mod genesis { + pub mod v1 { + include!("astria.protocol.genesis.v1.rs"); -#[path = ""] -pub mod sequencerblock { - pub mod v1alpha1 { - include!("astria.sequencerblock.v1alpha1.rs"); + #[cfg(feature = "serde")] + mod _serde_impls { + use super::*; + include!("astria.protocol.genesis.v1.serde.rs"); + } + } + } + #[path = ""] + pub mod memos { + pub mod v1 { + include!("astria.protocol.memos.v1.rs"); - #[cfg(feature = "serde")] - mod _serde_impl { - use super::*; - include!("astria.sequencerblock.v1alpha1.serde.rs"); + #[cfg(feature = "serde")] + mod _serde_impls { + use super::*; + include!("astria.protocol.memos.v1.serde.rs"); + } + } } - } - pub mod v1 { - include!("astria.sequencerblock.v1.rs"); + #[path = ""] + pub mod transaction { + pub mod v1 { + include!("astria.protocol.transaction.v1.rs"); - #[cfg(feature = "serde")] - mod _serde_impl { - use super::*; - include!("astria.sequencerblock.v1.serde.rs"); + #[cfg(feature = "serde")] + mod _serde_impl { + use super::*; + include!("astria.protocol.transaction.v1.serde.rs"); + } + } } } - pub mod optimisticblock { + #[path = ""] + pub mod sequencerblock { pub mod v1alpha1 { - include!("astria.sequencerblock.optimisticblock.v1alpha1.rs"); + include!("astria.sequencerblock.v1alpha1.rs"); #[cfg(feature = "serde")] mod _serde_impl { use super::*; - include!("astria.sequencerblock.optimisticblock.v1alpha1.serde.rs"); + include!("astria.sequencerblock.v1alpha1.serde.rs"); + } + } + + pub mod v1 { + include!("astria.sequencerblock.v1.rs"); + + #[cfg(feature = "serde")] + mod _serde_impl { + use super::*; + include!("astria.sequencerblock.v1.serde.rs"); + } + } + + pub mod optimistic { + pub mod v1alpha1 { + include!("astria.sequencerblock.optimistic.v1alpha1.rs"); + + #[cfg(feature = "serde")] + mod _serde_impl { + use super::*; + include!("astria.sequencerblock.optimistic.v1alpha1.serde.rs"); + } } } } -} -#[path = ""] -pub mod composer { - #[path = "astria.composer.v1.rs"] - pub mod v1; + #[path = ""] + pub mod composer { + #[path = "astria.composer.v1.rs"] + pub mod v1; + } } #[path = ""] diff --git a/crates/astria-core/src/primitive/v1/mod.rs b/crates/astria-core/src/primitive/v1/mod.rs index 56cb6922b5..f433b623fe 100644 --- a/crates/astria-core/src/primitive/v1/mod.rs +++ b/crates/astria-core/src/primitive/v1/mod.rs @@ -17,7 +17,7 @@ use sha2::{ }; use crate::{ - generated::primitive::v1 as raw, + generated::astria::primitive::v1 as raw, Protobuf, }; diff --git a/crates/astria-core/src/primitive/v1/u128.rs b/crates/astria-core/src/primitive/v1/u128.rs index ee77646a27..581f5b5ecc 100644 --- a/crates/astria-core/src/primitive/v1/u128.rs +++ b/crates/astria-core/src/primitive/v1/u128.rs @@ -1,6 +1,6 @@ //! Transformations of compiled protobuf types to other types. -use crate::generated::primitive::v1::Uint128; +use crate::generated::astria::primitive::v1::Uint128; impl From for Uint128 { fn from(primitive: u128) -> Self { let [ @@ -48,7 +48,8 @@ impl<'a> From<&'a u128> for Uint128 { #[cfg(test)] mod tests { - use crate::generated::primitive::v1::Uint128; + use super::Uint128; + #[track_caller] fn u128_roundtrip_check(expected: u128) { let pb: Uint128 = expected.into(); diff --git a/crates/astria-core/src/protocol/account/mod.rs b/crates/astria-core/src/protocol/account/mod.rs index 423eaae929..a3a6d96c3f 100644 --- a/crates/astria-core/src/protocol/account/mod.rs +++ b/crates/astria-core/src/protocol/account/mod.rs @@ -1,3 +1 @@ pub mod v1; - -use crate::generated::protocol::accounts::v1 as raw; diff --git a/crates/astria-core/src/protocol/account/v1/mod.rs b/crates/astria-core/src/protocol/account/v1/mod.rs index 697696ad66..1b16953fdc 100644 --- a/crates/astria-core/src/protocol/account/v1/mod.rs +++ b/crates/astria-core/src/protocol/account/v1/mod.rs @@ -1,7 +1,9 @@ -use super::raw; -use crate::primitive::v1::asset::{ - Denom, - ParseDenomError, +use crate::{ + generated::astria::protocol::accounts::v1 as raw, + primitive::v1::asset::{ + Denom, + ParseDenomError, + }, }; #[derive(Debug, thiserror::Error)] diff --git a/crates/astria-core/src/protocol/asset/mod.rs b/crates/astria-core/src/protocol/asset/mod.rs index bd31b9c85b..a3a6d96c3f 100644 --- a/crates/astria-core/src/protocol/asset/mod.rs +++ b/crates/astria-core/src/protocol/asset/mod.rs @@ -1,3 +1 @@ pub mod v1; - -use crate::generated::protocol::asset::v1 as raw; diff --git a/crates/astria-core/src/protocol/asset/v1/mod.rs b/crates/astria-core/src/protocol/asset/v1/mod.rs index c9330e32bf..9d8ec55c28 100644 --- a/crates/astria-core/src/protocol/asset/v1/mod.rs +++ b/crates/astria-core/src/protocol/asset/v1/mod.rs @@ -1,8 +1,10 @@ -use super::raw; -use crate::primitive::v1::asset::{ - self, - Denom, - ParseDenomError, +use crate::{ + generated::astria::protocol::asset::v1 as raw, + primitive::v1::asset::{ + self, + Denom, + ParseDenomError, + }, }; #[derive(Debug, thiserror::Error)] diff --git a/crates/astria-core/src/protocol/bridge/mod.rs b/crates/astria-core/src/protocol/bridge/mod.rs index f8b278c8f1..cab4a9fd57 100644 --- a/crates/astria-core/src/protocol/bridge/mod.rs +++ b/crates/astria-core/src/protocol/bridge/mod.rs @@ -1,3 +1,3 @@ pub mod v1; -use crate::generated::protocol::bridge::v1 as raw; +use crate::generated::astria::protocol::bridge::v1 as raw; diff --git a/crates/astria-core/src/protocol/fees/v1.rs b/crates/astria-core/src/protocol/fees/v1.rs index 400a63651d..f77441a90c 100644 --- a/crates/astria-core/src/protocol/fees/v1.rs +++ b/crates/astria-core/src/protocol/fees/v1.rs @@ -1,7 +1,7 @@ use prost::Name as _; use crate::{ - generated::protocol::fees::v1 as raw, + generated::astria::protocol::fees::v1 as raw, primitive::v1::asset, Protobuf, }; diff --git a/crates/astria-core/src/protocol/genesis/v1.rs b/crates/astria-core/src/protocol/genesis/v1.rs index 69bb2269bd..b03fb3cf8b 100644 --- a/crates/astria-core/src/protocol/genesis/v1.rs +++ b/crates/astria-core/src/protocol/genesis/v1.rs @@ -3,7 +3,7 @@ use std::convert::Infallible; pub use penumbra_ibc::params::IBCParameters; use crate::{ - generated::protocol::genesis::v1 as raw, + generated::astria::protocol::genesis::v1 as raw, primitive::v1::{ asset::{ self, diff --git a/crates/astria-core/src/protocol/memos/v1.rs b/crates/astria-core/src/protocol/memos/v1.rs index 796eea7f7b..9e9aa76cf2 100644 --- a/crates/astria-core/src/protocol/memos/v1.rs +++ b/crates/astria-core/src/protocol/memos/v1.rs @@ -1,4 +1,4 @@ -pub use crate::generated::protocol::memos::v1::{ +pub use crate::generated::astria::protocol::memos::v1::{ Ics20TransferDeposit, Ics20WithdrawalFromRollup, }; diff --git a/crates/astria-core/src/protocol/transaction/v1/mod.rs b/crates/astria-core/src/protocol/transaction/v1/mod.rs index 6bfbd5b22c..59616514d8 100644 --- a/crates/astria-core/src/protocol/transaction/v1/mod.rs +++ b/crates/astria-core/src/protocol/transaction/v1/mod.rs @@ -11,7 +11,7 @@ use crate::{ SigningKey, VerificationKey, }, - generated::protocol::transaction::v1 as raw, + generated::astria::protocol::transaction::v1 as raw, primitive::v1::{ TransactionId, ADDRESS_LEN, diff --git a/crates/astria-core/src/sequencerblock/v1/block.rs b/crates/astria-core/src/sequencerblock/v1/block.rs index 48f8907fac..d398680182 100644 --- a/crates/astria-core/src/sequencerblock/v1/block.rs +++ b/crates/astria-core/src/sequencerblock/v1/block.rs @@ -1,5 +1,7 @@ use std::{ collections::HashMap, + fmt::Display, + ops::Deref, vec::IntoIter, }; @@ -37,7 +39,7 @@ use crate::{ Transaction, TransactionError, }, - Protobuf as _, + Protobuf, }; #[derive(Debug, thiserror::Error)] @@ -580,13 +582,70 @@ enum SequencerBlockHeaderErrorKind { /// Exists to provide convenient access to fields of a [`SequencerBlock`]. #[derive(Clone, Debug, PartialEq)] pub struct SequencerBlockParts { - pub block_hash: [u8; 32], + pub block_hash: BlockHash, pub header: SequencerBlockHeader, pub rollup_transactions: IndexMap, pub rollup_transactions_proof: merkle::Proof, pub rollup_ids_proof: merkle::Proof, } +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct BlockHash([u8; 32]); + +impl BlockHash { + pub fn new(inner: [u8; 32]) -> Self { + Self(inner) + } + + pub fn get(&self) -> [u8; 32] { + self.0 + } +} + +impl AsRef<[u8]> for BlockHash { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +impl Deref for BlockHash { + type Target = [u8; 32]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[derive(Debug, thiserror::Error)] +#[error("block hash requires 32 bytes, but slice contained `{actual}`")] +pub struct BlockHashFromSliceError { + actual: usize, + source: std::array::TryFromSliceError, +} + +impl<'a> TryFrom<&'a [u8]> for BlockHash { + type Error = BlockHashFromSliceError; + + fn try_from(value: &'a [u8]) -> Result { + let inner = value.try_into().map_err(|source| Self::Error { + actual: value.len(), + source, + })?; + Ok(Self(inner)) + } +} + +// The display impl follows that of the pbjson derivation. +impl Display for BlockHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use base64::{ + display::Base64Display, + engine::general_purpose::STANDARD, + }; + Base64Display::new(&self.0, &STANDARD).fmt(f) + } +} + /// `SequencerBlock` is constructed from a tendermint/cometbft block by /// converting its opaque `data` bytes into sequencer specific types. #[derive(Clone, Debug, PartialEq)] @@ -597,7 +656,7 @@ pub struct SequencerBlockParts { pub struct SequencerBlock { /// The result of hashing the cometbft header. Guaranteed to not be `None` as compared to /// the cometbft/tendermint-rs return type. - block_hash: [u8; 32], + block_hash: BlockHash, /// the block header, which contains the cometbft header and additional sequencer-specific /// commitments. header: SequencerBlockHeader, @@ -622,7 +681,7 @@ impl SequencerBlock { /// /// This is done by hashing the `CometBFT` header stored in this block. #[must_use] - pub fn block_hash(&self) -> &[u8; 32] { + pub fn block_hash(&self) -> &BlockHash { &self.block_hash } @@ -687,7 +746,7 @@ impl SequencerBlock { rollup_ids_proof, } = self; raw::SequencerBlock { - block_hash: Bytes::copy_from_slice(&block_hash), + block_hash: Bytes::copy_from_slice(block_hash.as_ref()), header: Some(header.into_raw()), rollup_transactions: rollup_transactions .into_values() @@ -784,8 +843,9 @@ impl SequencerBlock { let mut rollup_datas = IndexMap::new(); for elem in data_list { - let raw_tx = crate::generated::protocol::transaction::v1::Transaction::decode(&*elem) - .map_err(SequencerBlockError::transaction_protobuf_decode)?; + let raw_tx = + crate::generated::astria::protocol::transaction::v1::Transaction::decode(&*elem) + .map_err(SequencerBlockError::transaction_protobuf_decode)?; let tx = Transaction::try_from_raw(raw_tx) .map_err(SequencerBlockError::raw_signed_transaction_conversion)?; for action in tx.into_unsigned().into_actions() { @@ -862,7 +922,7 @@ impl SequencerBlock { ); Ok(Self { - block_hash, + block_hash: BlockHash(block_hash), header: SequencerBlockHeader { chain_id, height, @@ -1031,7 +1091,7 @@ where /// Exists to provide convenient access to fields of a [`FilteredSequencerBlock`]. #[derive(Debug, Clone, PartialEq)] pub struct FilteredSequencerBlockParts { - pub block_hash: [u8; 32], + pub block_hash: BlockHash, pub header: SequencerBlockHeader, // filtered set of rollup transactions pub rollup_transactions: IndexMap, @@ -1049,7 +1109,7 @@ pub struct FilteredSequencerBlockParts { reason = "we want consistent and specific naming" )] pub struct FilteredSequencerBlock { - block_hash: [u8; 32], + block_hash: BlockHash, header: SequencerBlockHeader, // filtered set of rollup transactions rollup_transactions: IndexMap, @@ -1063,7 +1123,7 @@ pub struct FilteredSequencerBlock { impl FilteredSequencerBlock { #[must_use] - pub fn block_hash(&self) -> &[u8; 32] { + pub fn block_hash(&self) -> &BlockHash { &self.block_hash } @@ -1113,7 +1173,7 @@ impl FilteredSequencerBlock { .. } = self; raw::FilteredSequencerBlock { - block_hash: Bytes::copy_from_slice(&block_hash), + block_hash: Bytes::copy_from_slice(&*block_hash), header: Some(header.into_raw()), rollup_transactions: rollup_transactions .into_values() @@ -1269,6 +1329,27 @@ impl FilteredSequencerBlock { } } +impl Protobuf for FilteredSequencerBlock { + type Error = FilteredSequencerBlockError; + type Raw = raw::FilteredSequencerBlock; + + fn try_from_raw_ref(raw: &Self::Raw) -> Result { + Self::try_from_raw(raw.clone()) + } + + fn try_from_raw(raw: Self::Raw) -> Result { + Self::try_from_raw(raw) + } + + fn to_raw(&self) -> Self::Raw { + self.clone().into_raw() + } + + fn into_raw(self) -> Self::Raw { + self.into_raw() + } +} + #[derive(Debug, thiserror::Error)] #[error(transparent)] pub struct FilteredSequencerBlockError(FilteredSequencerBlockErrorKind); @@ -1363,7 +1444,7 @@ impl FilteredSequencerBlockError { #[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr( feature = "serde", - serde(into = "crate::generated::sequencerblock::v1::Deposit") + serde(into = "crate::generated::astria::sequencerblock::v1::Deposit") )] pub struct Deposit { // the address on the sequencer to which the funds were sent to. diff --git a/crates/astria-core/src/sequencerblock/v1/celestia.rs b/crates/astria-core/src/sequencerblock/v1/celestia.rs index 1fb359d0a4..a89e41c31c 100644 --- a/crates/astria-core/src/sequencerblock/v1/celestia.rs +++ b/crates/astria-core/src/sequencerblock/v1/celestia.rs @@ -6,6 +6,7 @@ use sha2::{ use super::{ block::{ + BlockHash, RollupTransactionsParts, SequencerBlock, SequencerBlockHeader, @@ -134,7 +135,7 @@ enum SubmittedRollupDataErrorKind { /// they can be converted directly into one another. This can change in the future. pub struct UncheckedSubmittedRollupData { /// The hash of the sequencer block. Must be 32 bytes. - pub sequencer_block_hash: [u8; 32], + pub sequencer_block_hash: BlockHash, /// The 32 bytes identifying the rollup this blob belongs to. Matches /// `astria.sequencerblock.v1.RollupTransactions.rollup_id` pub rollup_id: RollupId, @@ -154,7 +155,7 @@ impl UncheckedSubmittedRollupData { #[derive(Clone, Debug)] pub struct SubmittedRollupData { /// The hash of the sequencer block. Must be 32 bytes. - sequencer_block_hash: [u8; 32], + sequencer_block_hash: BlockHash, /// The 32 bytes identifying the rollup this blob belongs to. Matches /// `astria.sequencerblock.v1.RollupTransactions.rollup_id` rollup_id: RollupId, @@ -181,7 +182,7 @@ impl SubmittedRollupData { } #[must_use] - pub fn sequencer_block_hash(&self) -> &[u8; 32] { + pub fn sequencer_block_hash(&self) -> &BlockHash { &self.sequencer_block_hash } @@ -235,7 +236,7 @@ impl SubmittedRollupData { proof, } = self; raw::SubmittedRollupData { - sequencer_block_hash: Bytes::copy_from_slice(&sequencer_block_hash), + sequencer_block_hash: Bytes::copy_from_slice(&*sequencer_block_hash), rollup_id: Some(rollup_id.to_raw()), transactions, proof: Some(proof.into_raw()), @@ -382,7 +383,7 @@ enum SubmittedMetadataErrorKind { /// access the sequencer block's internal types. #[derive(Clone, Debug)] pub struct UncheckedSubmittedMetadata { - pub block_hash: [u8; 32], + pub block_hash: BlockHash, /// The original `CometBFT` header that is the input to this blob's original sequencer block. /// Corresponds to `astria.SequencerBlock.header`. pub header: SequencerBlockHeader, @@ -474,7 +475,7 @@ impl UncheckedSubmittedMetadata { #[derive(Clone, Debug)] pub struct SubmittedMetadata { /// The block hash obtained from hashing `.header`. - block_hash: [u8; 32], + block_hash: BlockHash, /// The sequencer block header. header: SequencerBlockHeader, /// The rollup IDs for which `SubmittedRollupData`s were submitted to celestia. @@ -507,7 +508,7 @@ impl<'a> Iterator for RollupIdIter<'a> { impl SubmittedMetadata { /// Returns the block hash of the tendermint header stored in this blob. #[must_use] - pub fn block_hash(&self) -> &[u8; 32] { + pub fn block_hash(&self) -> &BlockHash { &self.block_hash } @@ -617,7 +618,7 @@ impl SubmittedMetadata { .. } = self; raw::SubmittedMetadata { - block_hash: Bytes::copy_from_slice(&block_hash), + block_hash: Bytes::copy_from_slice(&*block_hash), header: Some(header.into_raw()), rollup_ids: rollup_ids.into_iter().map(RollupId::into_raw).collect(), rollup_transactions_proof: Some(rollup_transactions_proof.into_raw()), diff --git a/crates/astria-core/src/sequencerblock/v1/mod.rs b/crates/astria-core/src/sequencerblock/v1/mod.rs index 966696c9ff..9685f452f6 100644 --- a/crates/astria-core/src/sequencerblock/v1/mod.rs +++ b/crates/astria-core/src/sequencerblock/v1/mod.rs @@ -16,7 +16,7 @@ use sha2::{ }; use crate::{ - generated::sequencerblock::v1 as raw, + generated::astria::sequencerblock::v1 as raw, primitive::v1::{ derive_merkle_tree_from_rollup_txs, IncorrectRollupIdLength, diff --git a/crates/astria-sequencer-client/src/extension_trait.rs b/crates/astria-sequencer-client/src/extension_trait.rs index 59cab0f7e5..96eed49ef8 100644 --- a/crates/astria-sequencer-client/src/extension_trait.rs +++ b/crates/astria-sequencer-client/src/extension_trait.rs @@ -450,7 +450,7 @@ pub trait SequencerClientExt: Client { .map_err(|e| Error::tendermint_rpc("abci_query", e))?; let proto_response = - astria_core::generated::protocol::accounts::v1::BalanceResponse::decode( + astria_core::generated::astria::protocol::accounts::v1::BalanceResponse::decode( &*response.value, ) .map_err(|e| { @@ -492,7 +492,7 @@ pub trait SequencerClientExt: Client { .map_err(|e| Error::tendermint_rpc("abci_query", e))?; let proto_response = - astria_core::generated::protocol::asset::v1::AllowedFeeAssetsResponse::decode( + astria_core::generated::astria::protocol::asset::v1::AllowedFeeAssetsResponse::decode( &*response.value, ) .map_err(|e| { @@ -532,14 +532,12 @@ pub trait SequencerClientExt: Client { .map_err(|e| Error::tendermint_rpc("abci_query", e))?; let proto_response = - astria_core::generated::protocol::accounts::v1::NonceResponse::decode(&*response.value) - .map_err(|e| { - Error::abci_query_deserialization( - "astria.sequencer.v1.NonceResponse", - response, - e, - ) - })?; + astria_core::generated::astria::protocol::accounts::v1::NonceResponse::decode( + &*response.value, + ) + .map_err(|e| { + Error::abci_query_deserialization("astria.sequencer.v1.NonceResponse", response, e) + })?; Ok(proto_response.to_native()) } @@ -567,7 +565,7 @@ pub trait SequencerClientExt: Client { .map_err(|e| Error::tendermint_rpc("abci_query", e))?; let proto_response = - astria_core::generated::protocol::bridge::v1::BridgeAccountInfoResponse::decode( + astria_core::generated::astria::protocol::bridge::v1::BridgeAccountInfoResponse::decode( &*response.value, ) .map_err(|e| { @@ -599,7 +597,7 @@ pub trait SequencerClientExt: Client { .map_err(|e| Error::tendermint_rpc("abci_query", e))?; let proto_response = - astria_core::generated::protocol::bridge::v1::BridgeAccountLastTxHashResponse::decode( + astria_core::generated::astria::protocol::bridge::v1::BridgeAccountLastTxHashResponse::decode( &*response.value, ) .map_err(|e| { @@ -631,7 +629,7 @@ pub trait SequencerClientExt: Client { .map_err(|e| Error::tendermint_rpc("abci_query", e))?; let proto_response = - astria_core::generated::protocol::fees::v1::TransactionFeeResponse::decode( + astria_core::generated::astria::protocol::fees::v1::TransactionFeeResponse::decode( &*response.value, ) .map_err(|e| { diff --git a/crates/astria-sequencer-client/src/tests/http.rs b/crates/astria-sequencer-client/src/tests/http.rs index 23f1ee481c..3923c6178a 100644 --- a/crates/astria-sequencer-client/src/tests/http.rs +++ b/crates/astria-sequencer-client/src/tests/http.rs @@ -2,7 +2,7 @@ use std::time::Duration; use astria_core::{ crypto::SigningKey, - generated::protocol::{ + generated::astria::protocol::{ asset::v1::AllowedFeeAssetsResponse, fees::v1::TransactionFee, }, @@ -170,7 +170,7 @@ fn create_signed_transaction() -> Transaction { #[tokio::test] async fn get_latest_nonce() { - use astria_core::generated::protocol::accounts::v1::NonceResponse; + use astria_core::generated::astria::protocol::accounts::v1::NonceResponse; let MockSequencer { server, client, @@ -197,7 +197,7 @@ async fn get_latest_nonce() { #[tokio::test] async fn get_latest_balance() { - use astria_core::generated::protocol::accounts::v1::{ + use astria_core::generated::astria::protocol::accounts::v1::{ AssetBalance, BalanceResponse, }; @@ -262,7 +262,7 @@ async fn get_allowed_fee_assets() { #[tokio::test] async fn get_bridge_account_info() { use astria_core::{ - generated::protocol::bridge::v1::BridgeAccountInfoResponse, + generated::astria::protocol::bridge::v1::BridgeAccountInfoResponse, primitive::v1::RollupId, }; @@ -294,7 +294,7 @@ async fn get_bridge_account_info() { #[tokio::test] async fn get_bridge_account_last_transaction_hash() { - use astria_core::generated::protocol::bridge::v1::BridgeAccountLastTxHashResponse; + use astria_core::generated::astria::protocol::bridge::v1::BridgeAccountLastTxHashResponse; let MockSequencer { server, @@ -324,7 +324,7 @@ async fn get_bridge_account_last_transaction_hash() { #[tokio::test] async fn get_transaction_fee() { - use astria_core::generated::protocol::fees::v1::TransactionFeeResponse; + use astria_core::generated::astria::protocol::fees::v1::TransactionFeeResponse; let MockSequencer { server, diff --git a/crates/astria-sequencer-relayer/src/relayer/builder.rs b/crates/astria-sequencer-relayer/src/relayer/builder.rs index 0609af19ce..fd4d3fbf17 100644 --- a/crates/astria-sequencer-relayer/src/relayer/builder.rs +++ b/crates/astria-sequencer-relayer/src/relayer/builder.rs @@ -4,7 +4,7 @@ use std::{ time::Duration, }; -use astria_core::generated::sequencerblock::v1::sequencer_service_client::SequencerServiceClient; +use astria_core::generated::astria::sequencerblock::v1::sequencer_service_client::SequencerServiceClient; use astria_eyre::eyre::{ self, WrapErr as _, diff --git a/crates/astria-sequencer-relayer/src/relayer/mod.rs b/crates/astria-sequencer-relayer/src/relayer/mod.rs index 54c613ee81..e4e58e0cb7 100644 --- a/crates/astria-sequencer-relayer/src/relayer/mod.rs +++ b/crates/astria-sequencer-relayer/src/relayer/mod.rs @@ -5,7 +5,7 @@ use std::{ }; use astria_core::{ - generated::sequencerblock::v1::sequencer_service_client::SequencerServiceClient, + generated::astria::sequencerblock::v1::sequencer_service_client::SequencerServiceClient, sequencerblock::v1::SequencerBlock, }; use astria_eyre::eyre::{ diff --git a/crates/astria-sequencer-relayer/src/relayer/read.rs b/crates/astria-sequencer-relayer/src/relayer/read.rs index 58147fdfd2..6525f3e528 100644 --- a/crates/astria-sequencer-relayer/src/relayer/read.rs +++ b/crates/astria-sequencer-relayer/src/relayer/read.rs @@ -8,7 +8,7 @@ use std::{ }; use astria_core::{ - generated::sequencerblock::v1::{ + generated::astria::sequencerblock::v1::{ sequencer_service_client::SequencerServiceClient, GetSequencerBlockRequest, }, diff --git a/crates/astria-sequencer-relayer/src/relayer/write/conversion.rs b/crates/astria-sequencer-relayer/src/relayer/write/conversion.rs index 38733634d2..82d425ff89 100644 --- a/crates/astria-sequencer-relayer/src/relayer/write/conversion.rs +++ b/crates/astria-sequencer-relayer/src/relayer/write/conversion.rs @@ -10,7 +10,7 @@ use std::{ use astria_core::{ brotli::compress_bytes, - generated::sequencerblock::v1::{ + generated::astria::sequencerblock::v1::{ SubmittedMetadata, SubmittedMetadataList, SubmittedRollupData, diff --git a/crates/astria-sequencer-relayer/tests/blackbox/helpers/mock_sequencer_server.rs b/crates/astria-sequencer-relayer/tests/blackbox/helpers/mock_sequencer_server.rs index 82534b292a..33ff34d989 100644 --- a/crates/astria-sequencer-relayer/tests/blackbox/helpers/mock_sequencer_server.rs +++ b/crates/astria-sequencer-relayer/tests/blackbox/helpers/mock_sequencer_server.rs @@ -4,7 +4,7 @@ use std::{ }; use astria_core::{ - generated::sequencerblock::v1::{ + generated::astria::sequencerblock::v1::{ sequencer_service_server::{ SequencerService, SequencerServiceServer, diff --git a/crates/astria-sequencer-utils/src/blob_parser.rs b/crates/astria-sequencer-utils/src/blob_parser.rs index c729751c96..6fb450ff90 100644 --- a/crates/astria-sequencer-utils/src/blob_parser.rs +++ b/crates/astria-sequencer-utils/src/blob_parser.rs @@ -13,7 +13,7 @@ use std::{ use astria_core::{ brotli::decompress_bytes, - generated::sequencerblock::v1::{ + generated::astria::sequencerblock::v1::{ rollup_data::Value as RawRollupDataValue, Deposit as RawDeposit, RollupData as RawRollupData, diff --git a/crates/astria-sequencer-utils/src/genesis_example.rs b/crates/astria-sequencer-utils/src/genesis_example.rs index a464fd26c2..6d7d277719 100644 --- a/crates/astria-sequencer-utils/src/genesis_example.rs +++ b/crates/astria-sequencer-utils/src/genesis_example.rs @@ -5,7 +5,7 @@ use std::{ }; use astria_core::{ - generated::protocol::genesis::v1::{ + generated::astria::protocol::genesis::v1::{ AddressPrefixes, GenesisFees, IbcParameters, @@ -91,8 +91,8 @@ fn address_prefixes() -> AddressPrefixes { } #[expect(clippy::too_many_lines, reason = "all lines reasonably necessary")] -fn proto_genesis_state() -> astria_core::generated::protocol::genesis::v1::GenesisAppState { - astria_core::generated::protocol::genesis::v1::GenesisAppState { +fn proto_genesis_state() -> astria_core::generated::astria::protocol::genesis::v1::GenesisAppState { + astria_core::generated::astria::protocol::genesis::v1::GenesisAppState { accounts: accounts().into_iter().map(Protobuf::into_raw).collect(), address_prefixes: Some(address_prefixes()), authority_sudo_address: Some(alice().to_raw()), diff --git a/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs b/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs index 6c46e5c6e9..e2852fd225 100644 --- a/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs +++ b/crates/astria-sequencer/src/app/benchmark_and_test_utils.rs @@ -60,7 +60,7 @@ pub(crate) const TED_ADDRESS: &str = "4c4f91d8a918357ab5f6f19c1e179968fc39bb44"; pub(crate) fn address_prefixes() -> AddressPrefixes { AddressPrefixes::try_from_raw( - astria_core::generated::protocol::genesis::v1::AddressPrefixes { + astria_core::generated::astria::protocol::genesis::v1::AddressPrefixes { base: crate::benchmark_and_test_utils::ASTRIA_PREFIX.into(), ibc_compat: crate::benchmark_and_test_utils::ASTRIA_COMPAT_PREFIX.into(), }, @@ -129,9 +129,9 @@ pub(crate) fn default_fees() -> astria_core::protocol::genesis::v1::GenesisFees } } -pub(crate) fn proto_genesis_state() -> astria_core::generated::protocol::genesis::v1::GenesisAppState -{ - use astria_core::generated::protocol::genesis::v1::{ +pub(crate) fn proto_genesis_state() +-> astria_core::generated::astria::protocol::genesis::v1::GenesisAppState { + use astria_core::generated::astria::protocol::genesis::v1::{ GenesisAppState, IbcParameters, }; diff --git a/crates/astria-sequencer/src/app/benchmarks.rs b/crates/astria-sequencer/src/app/benchmarks.rs index 0b6aa3ef2b..390f650de1 100644 --- a/crates/astria-sequencer/src/app/benchmarks.rs +++ b/crates/astria-sequencer/src/app/benchmarks.rs @@ -61,7 +61,7 @@ impl Fixture { .collect::>(); let first_address = accounts.first().cloned().unwrap().address; let genesis_state = GenesisAppState::try_from_raw( - astria_core::generated::protocol::genesis::v1::GenesisAppState { + astria_core::generated::astria::protocol::genesis::v1::GenesisAppState { accounts, authority_sudo_address: first_address.clone(), ibc_sudo_address: first_address.clone(), diff --git a/crates/astria-sequencer/src/app/mod.rs b/crates/astria-sequencer/src/app/mod.rs index e116664a79..d2e0ca7b48 100644 --- a/crates/astria-sequencer/src/app/mod.rs +++ b/crates/astria-sequencer/src/app/mod.rs @@ -22,7 +22,7 @@ use std::{ }; use astria_core::{ - generated::protocol::transaction::v1 as raw, + generated::astria::protocol::transaction::v1 as raw, protocol::{ abci::AbciErrorCode, genesis::v1::GenesisAppState, diff --git a/crates/astria-sequencer/src/app/tests_app/mod.rs b/crates/astria-sequencer/src/app/tests_app/mod.rs index 1ca46cec4d..b91e107105 100644 --- a/crates/astria-sequencer/src/app/tests_app/mod.rs +++ b/crates/astria-sequencer/src/app/tests_app/mod.rs @@ -302,7 +302,7 @@ async fn app_transfer_block_fees_to_sudo() { #[tokio::test] async fn app_create_sequencer_block_with_sequenced_data_and_deposits() { use astria_core::{ - generated::sequencerblock::v1::RollupData as RawRollupData, + generated::astria::sequencerblock::v1::RollupData as RawRollupData, sequencerblock::v1::block::RollupData, }; diff --git a/crates/astria-sequencer/src/app/tests_breaking_changes.rs b/crates/astria-sequencer/src/app/tests_breaking_changes.rs index 031538a057..6a7d922e1c 100644 --- a/crates/astria-sequencer/src/app/tests_breaking_changes.rs +++ b/crates/astria-sequencer/src/app/tests_breaking_changes.rs @@ -188,7 +188,7 @@ async fn app_execute_transaction_with_every_action_snapshot() { }); acc.into_iter().map(Protobuf::into_raw).collect() }; - let genesis_state = astria_core::generated::protocol::genesis::v1::GenesisAppState { + let genesis_state = astria_core::generated::astria::protocol::genesis::v1::GenesisAppState { accounts, authority_sudo_address: Some(alice.try_address(ASTRIA_PREFIX).unwrap().to_raw()), ibc_sudo_address: Some(alice.try_address(ASTRIA_PREFIX).unwrap().to_raw()), diff --git a/crates/astria-sequencer/src/app/tests_execute_transaction.rs b/crates/astria-sequencer/src/app/tests_execute_transaction.rs index 76b1a1ae8e..ecc7111601 100644 --- a/crates/astria-sequencer/src/app/tests_execute_transaction.rs +++ b/crates/astria-sequencer/src/app/tests_execute_transaction.rs @@ -75,8 +75,8 @@ use crate::{ utils::create_deposit_event, }; -fn proto_genesis_state() -> astria_core::generated::protocol::genesis::v1::GenesisAppState { - astria_core::generated::protocol::genesis::v1::GenesisAppState { +fn proto_genesis_state() -> astria_core::generated::astria::protocol::genesis::v1::GenesisAppState { + astria_core::generated::astria::protocol::genesis::v1::GenesisAppState { authority_sudo_address: Some( get_alice_signing_key() .try_address(ASTRIA_PREFIX) diff --git a/crates/astria-sequencer/src/bridge/query.rs b/crates/astria-sequencer/src/bridge/query.rs index 2e3ff25ba2..ed5d1c5177 100644 --- a/crates/astria-sequencer/src/bridge/query.rs +++ b/crates/astria-sequencer/src/bridge/query.rs @@ -293,7 +293,7 @@ fn preprocess_request(params: &[(String, String)]) -> Result Result<[u8; 32]> { + async fn get_block_hash_by_height(&self, height: u64) -> Result { let Some(bytes) = self .nonverifiable_get_raw(keys::block_hash_by_height(height).as_bytes()) .await @@ -41,14 +42,14 @@ pub(crate) trait StateReadExt: StateRead { bail!("block hash not found for given height"); }; StoredValue::deserialize(&bytes) - .and_then(|value| storage::BlockHash::try_from(value).map(<[u8; 32]>::from)) + .and_then(|value| storage::BlockHash::try_from(value).map(BlockHash::from)) .wrap_err("invalid block hash bytes") } #[instrument(skip_all)] async fn get_sequencer_block_header_by_hash( &self, - hash: &[u8; 32], + hash: &BlockHash, ) -> Result { let Some(bytes) = self .nonverifiable_get_raw(keys::sequencer_block_header_by_hash(hash).as_bytes()) @@ -66,7 +67,7 @@ pub(crate) trait StateReadExt: StateRead { } #[instrument(skip_all)] - async fn get_rollup_ids_by_block_hash(&self, hash: &[u8; 32]) -> Result> { + async fn get_rollup_ids_by_block_hash(&self, hash: &BlockHash) -> Result> { let Some(bytes) = self .nonverifiable_get_raw(keys::rollup_ids_by_hash(hash).as_bytes()) .await @@ -183,7 +184,7 @@ pub(crate) trait StateWriteExt: StateWrite { #[instrument(skip_all)] async fn get_sequencer_block_by_hash( state: &S, - hash: &[u8; 32], + hash: &BlockHash, ) -> Result { let header = state .get_sequencer_block_header_by_hash(hash) @@ -228,7 +229,7 @@ async fn get_sequencer_block_by_hash( fn put_block_hash( state: &mut S, block_height: tendermint::block::Height, - block_hash: [u8; 32], + block_hash: BlockHash, ) -> Result<()> { let bytes = StoredValue::from(storage::BlockHash::from(&block_hash)) .serialize() diff --git a/crates/astria-sequencer/src/grpc/storage/values/block_hash.rs b/crates/astria-sequencer/src/grpc/storage/values/block_hash.rs index 4c772091ca..c96d1bfb66 100644 --- a/crates/astria-sequencer/src/grpc/storage/values/block_hash.rs +++ b/crates/astria-sequencer/src/grpc/storage/values/block_hash.rs @@ -28,15 +28,15 @@ impl<'a> Debug for BlockHash<'a> { } } -impl<'a> From<&'a [u8; 32]> for BlockHash<'a> { - fn from(block_hash: &'a [u8; 32]) -> Self { +impl<'a> From<&'a astria_core::sequencerblock::v1::block::BlockHash> for BlockHash<'a> { + fn from(block_hash: &'a astria_core::sequencerblock::v1::block::BlockHash) -> Self { BlockHash(Cow::Borrowed(block_hash)) } } -impl<'a> From> for [u8; 32] { +impl<'a> From> for astria_core::sequencerblock::v1::block::BlockHash { fn from(block_hash: BlockHash<'a>) -> Self { - block_hash.0.into_owned() + Self::new(block_hash.0.into_owned()) } } diff --git a/crates/astria-sequencer/src/sequencer.rs b/crates/astria-sequencer/src/sequencer.rs index 8d0dca32c6..c5b3411d1c 100644 --- a/crates/astria-sequencer/src/sequencer.rs +++ b/crates/astria-sequencer/src/sequencer.rs @@ -1,4 +1,4 @@ -use astria_core::generated::sequencerblock::v1::sequencer_service_server::SequencerServiceServer; +use astria_core::generated::astria::sequencerblock::v1::sequencer_service_server::SequencerServiceServer; use astria_eyre::{ anyhow_to_eyre, eyre::{ diff --git a/crates/astria-sequencer/src/service/consensus.rs b/crates/astria-sequencer/src/service/consensus.rs index 057fa091eb..a0b6a4a5ae 100644 --- a/crates/astria-sequencer/src/service/consensus.rs +++ b/crates/astria-sequencer/src/service/consensus.rs @@ -450,13 +450,15 @@ mod tests { async fn new_consensus_service(funded_key: Option) -> (Consensus, Mempool) { let accounts = if let Some(funded_key) = funded_key { - vec![astria_core::generated::protocol::genesis::v1::Account { - address: Some( - crate::benchmark_and_test_utils::astria_address(funded_key.address_bytes()) - .to_raw(), - ), - balance: Some(10u128.pow(19).into()), - }] + vec![ + astria_core::generated::astria::protocol::genesis::v1::Account { + address: Some( + crate::benchmark_and_test_utils::astria_address(funded_key.address_bytes()) + .to_raw(), + ), + balance: Some(10u128.pow(19).into()), + }, + ] } else { vec![] }; diff --git a/crates/astria-sequencer/src/service/info/mod.rs b/crates/astria-sequencer/src/service/info/mod.rs index 23b863aebc..a233359274 100644 --- a/crates/astria-sequencer/src/service/info/mod.rs +++ b/crates/astria-sequencer/src/service/info/mod.rs @@ -212,7 +212,7 @@ mod tests { #[tokio::test] async fn handle_balance_query() { use astria_core::{ - generated::protocol::accounts::v1 as raw, + generated::astria::protocol::accounts::v1 as raw, protocol::account::v1::AssetBalance, }; @@ -279,7 +279,7 @@ mod tests { #[tokio::test] async fn handle_denom_query() { - use astria_core::generated::protocol::asset::v1 as raw; + use astria_core::generated::astria::protocol::asset::v1 as raw; let storage = cnidarium::TempStorage::new().await.unwrap(); let mut state = StateDelta::new(storage.latest_snapshot()); @@ -323,7 +323,7 @@ mod tests { #[tokio::test] async fn handle_allowed_fee_assets_query() { - use astria_core::generated::protocol::asset::v1 as raw; + use astria_core::generated::astria::protocol::asset::v1 as raw; let storage = cnidarium::TempStorage::new().await.unwrap(); let mut state = StateDelta::new(storage.latest_snapshot()); diff --git a/crates/astria-sequencer/src/service/mempool/mod.rs b/crates/astria-sequencer/src/service/mempool/mod.rs index 88b59a5b77..67cfa9a48d 100644 --- a/crates/astria-sequencer/src/service/mempool/mod.rs +++ b/crates/astria-sequencer/src/service/mempool/mod.rs @@ -10,7 +10,7 @@ use std::{ }; use astria_core::{ - generated::protocol::transaction::v1 as raw, + generated::astria::protocol::transaction::v1 as raw, primitive::v1::asset::IbcPrefixed, protocol::{ abci::AbciErrorCode, diff --git a/crates/astria-telemetry/src/lib.rs b/crates/astria-telemetry/src/lib.rs index 4028a513dd..e5daca4878 100644 --- a/crates/astria-telemetry/src/lib.rs +++ b/crates/astria-telemetry/src/lib.rs @@ -34,6 +34,7 @@ use tracing_subscriber::{ LevelFilter, ParseError, }, + fmt::format::FmtSpan, layer::SubscriberExt as _, util::{ SubscriberInitExt as _, @@ -229,7 +230,11 @@ impl Config { let mut pretty_printer = None; if force_stdout || std::io::stdout().is_terminal() { if pretty_print { - pretty_printer = Some(tracing_subscriber::fmt::layer().compact()); + pretty_printer = Some( + tracing_subscriber::fmt::layer() + .compact() + .with_span_events(FmtSpan::NEW | FmtSpan::CLOSE), + ) } else { tracer_provider = tracer_provider.with_simple_exporter( SpanExporter::builder() diff --git a/justfile b/justfile index 6c09ebd06e..d94822eb81 100644 --- a/justfile +++ b/justfile @@ -40,6 +40,7 @@ _crate_short_name crate quiet="": #!/usr/bin/env sh set -eu case {{crate}} in + astria-auctioneer) short_name=auctioneer ;; astria-bridge-withdrawer) short_name=bridge-withdrawer ;; astria-cli) short_name=cli ;; astria-composer) short_name=composer ;; diff --git a/specs/data-flow-and-verification.md b/specs/data-flow-and-verification.md index 613b00f70a..6bde2222d9 100644 --- a/specs/data-flow-and-verification.md +++ b/specs/data-flow-and-verification.md @@ -11,32 +11,33 @@ the exit point is execution by a rollup node. ## Entry point -The entry point for rollup data is via a `sequence::Action`, which can become -part of a sequencer transaction. The data types are as follows: +The entry point for rollup data is via a `RollupDataSubmission` action, which can +become part of a sequencer transaction. The data types are as follows: ```rust -// sequence::Action -pub struct Action { - pub(crate) chain_id: Vec, - pub(crate) data: Vec, +pub struct RollupDataSubmission { + rollup_id: RollupId, + data: Bytes, + // the asset to pay fees with + fee_asset: asset::Denom, } ``` ```rust -// an unsigned transaction -pub struct Unsigned { - pub(crate) nonce: Nonce, - pub(crate) actions: Vec, +// an unsigned transaction body +pub struct TransactionBody { + actions: Actions, // vector of actions + params: TransactionParams, // chain id, nonce } ``` -The `data` field inside the `sequence::Action` is arbitrary bytes, which should +The `data` field inside the `RollupDataSubmission` is arbitrary bytes, which should be an encoded rollup transaction. The sequencer is agnostic to the transaction -format of the rollups using it. The `chain_id` field is an identifier for the +format of the rollups using it. The `rollup_id` field is an identifier for the rollup the data is destined for. To submit rollup data to the system, the user creates a transaction with a -`sequence::Action` within it and signs and submits it to the sequencer. The +`RollupDataSubmission` within it and signs and submits it to the sequencer. The sequencer will then include it in a block, thus finalizing its ordering. ## Sequencer to data availability @@ -47,56 +48,81 @@ the block data is published via a data availability layer. The block data published is as follows: ```rust -pub struct SequencerBlockData { - block_hash: Hash, - header: Header, - /// chain ID -> rollup transactions - rollup_data: BTreeMap>>, - /// The root of the action tree for this block. - action_tree_root: [u8; 32], - /// The inclusion proof that the action tree root is included - /// in `Header::data_hash`. - action_tree_root_inclusion_proof: merkle::Proof, - /// The commitment to the chain IDs of the rollup data. - /// The merkle root of the tree where the leaves are the chain IDs. - chain_ids_commitment: [u8; 32], - /// The inclusion proof that the chain IDs commitment is included - /// in `Header::data_hash`. - chain_ids_commitment_inclusion_proof: merkle::Proof, +pub struct SequencerBlockHeader { + chain_id: tendermint::chain::Id, + height: tendermint::block::Height, + time: Time, + // the merkle root of all the rollup data in the block + rollup_transactions_root: [u8; 32], + // the merkle root of all transactions in the block + data_hash: [u8; 32], + proposer_address: account::Id, +} + +pub struct SequencerBlock { + /// the cometbft block hash for this block + block_hash: [u8; 32], + /// the block header, which contains cometbft header info and additional sequencer-specific + /// commitments. + header: SequencerBlockHeader, + /// The collection of rollup transactions that were included in this block. + rollup_transactions: IndexMap, + /// The inclusion proof that the rollup transactions merkle root is included + /// in `header.data_hash`. + rollup_transactions_proof: merkle::Proof, + /// The inclusion proof that the rollup IDs commitment is included + /// in `header.data_hash`. + rollup_ids_proof: merkle::Proof, } ``` -When this data is actually published, it's split into multiple structures. +When the `SequencerBlock` is actually published, it's split into multiple structures. Specifically, the data for each rollup is written independently, while a "base" -data type which contains the rollup chain IDs included in the block is also -written. This allows each rollup to only require the `SequencerNamespaceData` -for the block and the `RollupNamespaceData` for its own rollup transactions. For -each block, if there are N rollup chain IDs included, 1 + N structures are -written to DA. +data type which contains all the other `SequencerBlock` info, plus the list of +rollup IDs in the block, is written. This allows each rollup to only require +the `SequencerNamespaceData` for the block and the `RollupNamespaceData` for +its own rollup transactions. For each block, if there are N rollup chain IDs +included, 1 + N structures are written to DA. ```rust -/// SequencerNamespaceData represents the data written to the "base" -/// sequencer namespace. It contains all the other chain IDs (and thus, +/// SubmittedMetadata represents the data written to the "base" +/// sequencer namespace. It contains all the other rollup IDs (and thus, /// namespaces) that were also written to in the same block. -#[derive(Serialize, Deserialize, Debug)] -pub struct SequencerNamespaceData { - pub block_hash: Hash, - pub header: Header, - pub rollup_chain_ids: Vec, - pub action_tree_root: [u8; 32], - pub action_tree_root_inclusion_proof: InclusionProof, - pub chain_ids_commitment: [u8; 32], +pub struct SubmittedMetadata { + /// The block hash obtained from hashing `.header`. + block_hash: [u8; 32], + /// The sequencer block header. + header: SequencerBlockHeader, + /// The rollup IDs for which `SubmittedRollupData`s were submitted to celestia. + /// Corresponds to the `astria.sequencer.v1.RollupTransactions.id` field + /// and is extracted from `astria.SequencerBlock.rollup_transactions`. + rollup_ids: Vec, + /// The proof that the rollup transactions are included in sequencer block. + /// Corresponds to `astria.SequencerBlock.rollup_transactions_proof`. + rollup_transactions_proof: merkle::Proof, + /// The proof that this sequencer blob includes all rollup IDs of + /// the original sequencer block it was derived from. + /// This proof together with `Sha256(MTH(rollup_ids))` (Sha256 + /// applied to the Merkle Tree Hash of the rollup ID sequence) must be + /// equal to `header.data_hash` which itself must match + /// `astria.SequencerBlock.header.data_hash`. This field corresponds to + /// `astria.SequencerBlock.rollup_ids_proof`. + rollup_ids_proof: merkle::Proof, } ``` ```rust -/// RollupNamespaceData represents the data written to a rollup namespace. -#[derive(Serialize, Deserialize, Debug)] -pub struct RollupNamespaceData { - pub(crate) block_hash: Hash, - pub(crate) chain_id: ChainId, - pub rollup_txs: Vec>, - pub(crate) inclusion_proof: InclusionProof, +/// SubmittedRollupData represents the data written to a rollup namespace. +pub struct SubmittedRollupData { + /// The hash of the sequencer block. Must be 32 bytes. + sequencer_block_hash: [u8; 32], + /// The 32 bytes identifying the rollup this blob belongs to. Matches + /// `astria.sequencerblock.v1.RollupTransactions.rollup_id` + rollup_id: RollupId, + /// A list of opaque bytes that are serialized rollup transactions. + transactions: Vec, + /// The proof that these rollup transactions are included in sequencer block. + proof: merkle::Proof, } ``` @@ -108,9 +134,6 @@ properties as ordering, completeness, and correctness respectively. It is able to do this *without* requiring the full transaction data of the block, as is explained below. -Note that the `Header` field in `SequencerNamespaceData` is a [Tendermint -header](https://github.com/informalsystems/tendermint-rs/blob/4d81b67c28510db7d2d99ed62ebfa9fdf0e02141/tendermint/src/block/header.rs#L25). - ## Data availability to rollup node For a rollup node to verify the ordering, completeness, and correctness of the @@ -120,13 +143,13 @@ block data it receives, it must verify the following: 2. the block hash was in fact committed by the sequencer (ie. >2/3 stake voted to commit this block hash to the chain) 3. the block header correctly hashes to the block hash -4. the `data_hash` inside the header contains the `action_tree_root` of the +4. the `data_hash` inside the header contains the `rollup_transactions_root` of the block (see [sequencer inclusion proofs](sequencer-inclusion-proofs.md) for - details), which is a commitment to the `sequence:Action`s in the block -5. the `rollup_txs` inside `RollupNamespaceData` is contained within the - `action_tree_root` -6. the `chain_ids_commitment` is a valid commitment to `rollup_chain_ids` -7. the `data_hash` inside the header contains the `chain_ids_commitment` + details), which is a commitment to the `RollupDataSubmission`s in the block +5. the `transactions` inside `SubmittedRollupData` is contained within the + `rollup_transactions_root` +6. the `rollup_ids_commitment` is a valid commitment to `rollup_ids` +7. the `data_hash` inside the header contains the `rollup_ids_commitment` for the block. Let's go through these one-by-one. @@ -159,45 +182,45 @@ The block hash is a commitment to the block header (specifically, the merkle root of the tree where the leaves are each header field). We then verify that the block header merkleizes to the block hash correctly. -### 4. `action_tree_root` +### 4. `rollup_transactions_root` -The block's data (transactions) contain the `action_tree_root` of the block (see -[sequencer inclusion proofs](sequencer-inclusion-proofs.md) for details), which -is a commitment to the `sequence:Action`s in the block. Specifically, the -`action_tree_root` is the root of a merkle tree where each leaf is a commitment -to the rollup data for one spceific rollup. The block header contains the field -`data_hash` which is the merkle root of all the transactions in a block. Since -`action_tree_root` is a transaction, we can prove its inclusion inside -`data_hash` (the `action_tree_root_inclusion_proof` field inside -`SequencerNamespaceData`). Then, in the next step, we can verify that the rollup -data we received was included inside `action_tree_root`. +The block's data (transactions) contain the `rollup_transactions_root` of the +block (see [sequencer inclusion proofs](sequencer-inclusion-proofs.md) for details), +which is a commitment to the `RollupDataSubmission`s in the block. Specifically, +the `rollup_transactions_root` is the root of a merkle tree where each leaf is a + commitment to the rollup data for one spceific rollup. The block header contains +the field `data_hash` which is the merkle root of all the transactions in a block. +Since `rollup_transactions_root` is a transaction, we can prove its inclusion inside +`data_hash` (the `rollup_transactions_proof` field inside +`SubmittedMetadata`). Then, in the next step, we can verify that the rollup +data we received was included inside `rollup_transactions_root`. ### 5. `rollup_txs` We calculate a commitment of the rollup data we receive (`rollup_txs` inside -`RollupNamespaceData`). We then verify that this data is included inside -`action_tree_root` (via the `inclusion_proof` field inside -`RollupNamespaceData`). At this point, we are now certain that the rollup data +`SubmittedRollupMetadata`). We then verify that this data is included inside +`rollup_transactions_root` (via the `proof` field inside +`SubmittedRollupMetadata`). At this point, we are now certain that the rollup data we received, which is a subset of the entire block's data, was in fact committed by the majority of the sequencer chain's validators. -### 6. `chain_ids_commitment` +### 6. `rollup_ids_root` -The `SequencerNamespaceData` contains a list of the `rollup_chain_ids` that were -included in the block. However, to ensure that chain IDs are not omitted when +The `SubmittedMetadata` contains a list of the `rollup_ids` that were +included in the block. However, to ensure that rollup IDs are not omitted when publishing the data (which would be undetectable to rollup nodes without forcing -them to pull the entire block's data), we also add a commitment to the chain IDs +them to pull the entire block's data), we also add a commitment to the rollup IDs in the block inside the block's transaction data. We ensure that the -`rollup_chain_ids` inside `SequencerNamespaceData` match the -`chain_ids_commitment`. This proves that no chain IDs were omitted from the -published block, as if any were omitted, then the `chain_ids_commitment` would -not match the commitment generated from `rollup_chain_ids`. +`rollup_ids` inside `SubmittedMetadata` match the +`rollup_ids_root`. This proves that no rollup IDs were omitted from the +published block, as if any were omitted, then the `rollup_ids_root` would +not match the commitment generated from `rollup_ids`. -### 7. `chain_ids_commitment_inclusion_proof` +### 7. `rollup_ids_root_inclusion_proof` -Similarly to verification of `action_tree_root` inside `data_hash`, we also verify -an inclusion proof of `chain_ids_commitment` inside `data_hash` when receiving a -published block. +Similarly to verification of `rollup_transactions_root` inside `data_hash`, we also +verify an inclusion proof of `rollup_ids_root` inside `data_hash` when receiving +a published block. ## Exit point diff --git a/specs/sequencer-inclusion-proofs.md b/specs/sequencer-inclusion-proofs.md index 273d2d028d..51b1f99417 100644 --- a/specs/sequencer-inclusion-proofs.md +++ b/specs/sequencer-inclusion-proofs.md @@ -29,14 +29,18 @@ A rollup conductor needs to be able to verify its subset of relevant data without needing to pull all the transaction data for a block. To do this, the block proposer includes a special "commitment tx" at the start of the block. This a special transaction type that can only be included by the proposer. +The "commitment tx" is the merkle root of a tree where each leaf is a commitment +to the batch of transactions for one specific rollup. ![image](assets/sequencer_inclusion_proof_0.png) When building a block, the proposer deconstructs all txs into their contained -`sequence::Action`s and groups them all. Remember that 1 `sequence::Action` -corresponds to 1 rollup transaction. Then, a commitment to the set of all -actions for a chain becomes a leaf in a merkle tree, the root of which becomes -the "commitment tx" +`RollupDataSubmission` actions and groups them all. Remember that 1 +`RollupDataSubmission` action corresponds to 1 rollup transaction. Then, a +commitment to the set of all actions for a chain becomes a leaf in a merkle tree, +the root of which becomes the "commitment tx". The other validators reconstruct +this merkle tree when validating the proposal, and only accept the proposal if +the tree is a valid representation of the rollup data in the block. ![image](assets/sequencer_inclusion_proof_2.png) @@ -55,8 +59,8 @@ For example: layer and calculate the commitment which should match the leaf in red (in the above diagram) - we use the inclusion proof (in green) to verify the txs for chain C were - included in the action tree -- we verify a proof of inclusion of "commitment tx" (action tree root) inside + included in the rollup data merkle tree +- we verify a proof of inclusion of "commitment tx" (rollup data tree root) inside the block header's `data_hash` - we verify that `data_hash` was correctly included in the block's `block_hash` - verify `block_hash`'s cometbft >2/3 commitment @@ -68,8 +72,8 @@ staking power of the sequencer chain. Additionally, the commitment to the actions for a chain actually also includes a merkle root. The commitment contains of the merkle root of a tree where where -the leaves are the transactions for that rollup; ie. all the `sequence::Action`s -for that chain. "Commitment to actions for chain X" is implemented as `(chain_id +the leaves are the transactions for that rollup; ie. all the `RollupDataSubmission` +actions for that chain. "Commitment to actions for chain X" is implemented as `(chain_id || root of tx tree for rollup)`, allowing for easy verification that a specific rollup transaction was included in a sequencer block. This isn't required for any specific conductor logic, but nice for applications building on top of the diff --git a/tools/protobuf-compiler/CHANGELOG.md b/tools/protobuf-compiler/CHANGELOG.md new file mode 100644 index 0000000000..a017ade42d --- /dev/null +++ b/tools/protobuf-compiler/CHANGELOG.md @@ -0,0 +1,15 @@ + + +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Unreleased + +### Added + +- Clears target directory before generating new code. + [#1825](https://github.com/astriaorg/astria/pull/1825) diff --git a/tools/protobuf-compiler/src/main.rs b/tools/protobuf-compiler/src/main.rs index 1034672e90..20cf898353 100644 --- a/tools/protobuf-compiler/src/main.rs +++ b/tools/protobuf-compiler/src/main.rs @@ -1,3 +1,8 @@ +//! Generates Rust code of protobuf specs located in proto/ and writes +//! the result to crates/astria-core/src/generated. +//! +//! This tool will delete everything in crates/astria-core/src/generated (except +//! mod.rs). use std::{ collections::{ HashMap, @@ -59,23 +64,16 @@ fn main() { let files = find_protos(src_dir); + purge_out_dir(&out_dir); + tonic_build::configure() .build_client(true) .build_server(true) .emit_rerun_if_changed(false) - .bytes([ - ".astria", - ".celestia", - ".cosmos", - ".tendermint", - ]) + .bytes([".astria", ".celestia", ".cosmos", ".tendermint"]) .client_mod_attribute(".", "#[cfg(feature=\"client\")]") .server_mod_attribute(".", "#[cfg(feature=\"server\")]") .extern_path(".astria_vendored.penumbra", "::penumbra-proto") - .extern_path( - ".astria_vendored.tendermint.abci.ValidatorUpdate", - "crate::generated::astria_vendored::tendermint::abci::ValidatorUpdate", - ) .type_attribute(".astria.primitive.v1.Uint128", "#[derive(Copy)]") .type_attribute( ".astria.protocol.genesis.v1.IbcParameters", @@ -206,7 +204,8 @@ fn get_buf_from_env() -> PathBuf { "linux" => "You can download it from https://github.com/bufbuild/buf/releases; if you are on Arch Linux, install it from the AUR with `rua install buf` or another helper", _other => "Check if there is a precompiled version for your OS at https://github.com/bufbuild/buf/releases" }; - let error_msg = "Could not find `buf` installation and this build crate cannot proceed without + let error_msg = "Could not find `buf` installation and this build crate cannot proceed \ + without this knowledge. If `buf` is installed and this crate had trouble finding it, you can set the `BUF` environment variable with the specific path to your installed `buf` binary."; @@ -217,3 +216,25 @@ fn get_buf_from_env() -> PathBuf { .or_else(|| which::which("buf").ok()) .expect(&msg) } + +fn purge_out_dir(path: impl AsRef) { + for entry in read_dir(path) + .expect("should be able to read target folder for generated files") + .flatten() + { + // skip mod.rs as it's assumed to be the only non-generated file in the out dir. + if entry + .path() + .file_name() + .expect("every entry in the generated file out dir should have a name") + == "mod.rs" + { + continue; + } + + std::fs::remove_file(entry.path()).expect( + "all entries in the out dir should be generated files, and the out dir is expected to \ + have read, write, execute permissions set", + ); + } +}