From 8ee733c3b39779aaca2d9032dc11b37c81e89353 Mon Sep 17 00:00:00 2001 From: Chris Czub Date: Thu, 22 Aug 2024 12:17:27 -0400 Subject: [PATCH 01/43] Basic IBC Handshake Test (#4797) ## Describe your changes This adds a basic IBC handshake test, using the existing mock client. Some opportunistic refactoring is also included. This PR adds a new `tests::common::ibc_tests` module, which contains a `MockRelayer` that can be extended later on. Follow-up tasks should be basic transfer testing, transfer timeout testing, and testing with malformed requests. While debugging this test, bugs were found in the various IBC query APIs, specifically that the `proof_height` was consistently being returned one lower than the height whose header would contain the app_hash necessary for validating the proof. The Go relayer is unaffected because it uses the ABCI RPC query interface instead, and Hermes uses the affected APIs but discards the affected `proof_height` fields and uses its own internal mechanisms for height tracking instead. Fixes were included for the affected APIs. ## Issue Closes #3758 ## Checklist before requesting a review - [ ] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > There are changes to TendermintProxy and IBC RPC responses however anyone using the affected RPCs would quickly run into the issues we saw in testing that revealed the bugs in the RPC responses, and the chain would properly block any requests made based on the incorrect response values. The changeset also affects the test code heavily however there should be nothing that affects consensus or state. --------- Co-authored-by: Ava Howell --- Cargo.lock | 1 + crates/bin/pd/src/network/config.rs | 44 +- crates/core/app/Cargo.toml | 1 + crates/core/app/src/server/consensus.rs | 6 +- crates/core/app/tests/common/ibc_tests/mod.rs | 193 ++++ .../core/app/tests/common/ibc_tests/node.rs | 315 ++++++ .../app/tests/common/ibc_tests/relayer.rs | 907 ++++++++++++++++++ crates/core/app/tests/common/mod.rs | 4 + crates/core/app/tests/ibc_handshake.rs | 77 ++ .../app/tests/mock_consensus_block_proving.rs | 98 +- .../ibc/src/component/rpc/client_query.rs | 43 +- .../ibc/src/component/rpc/connection_query.rs | 60 +- .../ibc/src/component/rpc/consensus_query.rs | 106 +- crates/test/mock-consensus/src/block.rs | 11 +- crates/test/mock-consensus/src/lib.rs | 18 +- 15 files changed, 1762 insertions(+), 122 deletions(-) create mode 100644 crates/core/app/tests/common/ibc_tests/mod.rs create mode 100644 crates/core/app/tests/common/ibc_tests/node.rs create mode 100644 crates/core/app/tests/common/ibc_tests/relayer.rs create mode 100644 crates/core/app/tests/ibc_handshake.rs diff --git a/Cargo.lock b/Cargo.lock index 50dc26d63a..e0ab44877a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4568,6 +4568,7 @@ dependencies = [ "tap", "tempfile", "tendermint", + "tendermint-config", "tendermint-light-client-verifier", "tendermint-proto", "tokio", diff --git a/crates/bin/pd/src/network/config.rs b/crates/bin/pd/src/network/config.rs index 5334eadbc6..1cc87b6c1f 100644 --- a/crates/bin/pd/src/network/config.rs +++ b/crates/bin/pd/src/network/config.rs @@ -175,6 +175,48 @@ pub struct ValidatorKeys { } impl ValidatorKeys { + /// Use a hard-coded seed to generate a new set of validator keys. + pub fn from_seed(seed: [u8; 32]) -> Self { + // Create the spend key for this node. + let seed = SpendKeyBytes(seed); + let spend_key = SpendKey::from(seed.clone()); + + // Create signing key and verification key for this node. + let validator_id_sk = spend_key.spend_auth_key(); + let validator_id_vk = VerificationKey::from(validator_id_sk); + + let validator_cons_sk = ed25519_consensus::SigningKey::new(OsRng); + + // generate consensus key for tendermint. + let validator_cons_sk = tendermint::PrivateKey::Ed25519( + validator_cons_sk + .as_bytes() + .as_slice() + .try_into() + .expect("32 bytes"), + ); + let validator_cons_pk = validator_cons_sk.public_key(); + + // generate P2P auth key for tendermint. + let node_key_sk = ed25519_consensus::SigningKey::from(seed.0); + let signing_key_bytes = node_key_sk.as_bytes().as_slice(); + + // generate consensus key for tendermint. + let node_key_sk = + tendermint::PrivateKey::Ed25519(signing_key_bytes.try_into().expect("32 bytes")); + let node_key_pk = node_key_sk.public_key(); + + ValidatorKeys { + validator_id_sk: validator_id_sk.clone(), + validator_id_vk, + validator_cons_sk, + validator_cons_pk, + node_key_sk, + node_key_pk, + validator_spend_key: seed, + } + } + pub fn generate() -> Self { // Create the spend key for this node. // TODO: change to use seed phrase @@ -198,7 +240,7 @@ impl ValidatorKeys { let validator_cons_pk = validator_cons_sk.public_key(); // generate P2P auth key for tendermint. - let node_key_sk = ed25519_consensus::SigningKey::new(OsRng); + let node_key_sk = ed25519_consensus::SigningKey::from(seed.0); let signing_key_bytes = node_key_sk.as_bytes().as_slice(); // generate consensus key for tendermint. diff --git a/crates/core/app/Cargo.toml b/crates/core/app/Cargo.toml index 6fc5a99c54..f16d81a4e1 100644 --- a/crates/core/app/Cargo.toml +++ b/crates/core/app/Cargo.toml @@ -98,6 +98,7 @@ rand_chacha = { workspace = true } rand_core = { workspace = true } tap = { workspace = true } tempfile = { workspace = true } +tendermint-config = { workspace = true } tower-http = { workspace = true } tracing-subscriber = { workspace = true } url = { workspace = true } diff --git a/crates/core/app/src/server/consensus.rs b/crates/core/app/src/server/consensus.rs index 5f4ecdca7b..b60e1aaffe 100644 --- a/crates/core/app/src/server/consensus.rs +++ b/crates/core/app/src/server/consensus.rs @@ -173,8 +173,8 @@ impl Consensus { &mut self, proposal: request::ProcessProposal, ) -> Result { - tracing::info!(height = ?proposal.height, proposer = ?proposal.proposer_address, hash = %proposal.hash, "processing proposal"); - // We process the propopsal in an isolated state fork. Eventually, we should cache this work and + tracing::info!(height = ?proposal.height, proposer = ?proposal.proposer_address, proposal_hash = %proposal.hash, "processing proposal"); + // We process the proposal in an isolated state fork. Eventually, we should cache this work and // re-use it when processing a `FinalizeBlock` message (starting in `0.38.x`). let mut tmp_app = App::new(self.storage.latest_snapshot()); Ok(tmp_app.process_proposal(proposal).await) @@ -187,7 +187,9 @@ impl Consensus { // We don't need to print the block height, because it will already be // included in the span modeling the abci request handling. tracing::info!(time = ?begin_block.header.time, "beginning block"); + let events = self.app.begin_block(&begin_block).await; + Ok(response::BeginBlock { events }) } diff --git a/crates/core/app/tests/common/ibc_tests/mod.rs b/crates/core/app/tests/common/ibc_tests/mod.rs new file mode 100644 index 0000000000..4c03e8a44c --- /dev/null +++ b/crates/core/app/tests/common/ibc_tests/mod.rs @@ -0,0 +1,193 @@ +use {anyhow::Result, std::time::Duration}; + +mod relayer; +use anyhow::Context as _; +use decaf377_rdsa::{SigningKey, SpendAuth, VerificationKey}; +use penumbra_app::{ + app::{MAX_BLOCK_TXS_PAYLOAD_BYTES, MAX_EVIDENCE_SIZE_BYTES}, + genesis, +}; +use penumbra_keys::keys::{SpendKey, SpendKeyBytes}; +use penumbra_mock_consensus::TestNode; +use penumbra_proto::core::component::stake::v1::Validator; +use penumbra_shielded_pool::genesis::Allocation; +use penumbra_stake::{DelegationToken, GovernanceKey, IdentityKey}; +#[allow(unused_imports)] +pub use relayer::MockRelayer; + +mod node; +pub use node::TestNodeWithIBC; +use serde::Deserialize; +use tendermint::{consensus::params::AbciParams, public_key::Algorithm, Genesis}; + +/// Collection of all keypairs required for a Penumbra validator. +/// Used to generate a stable identity for a [`NetworkValidator`]. +/// TODO: copied this from pd crate +#[derive(Deserialize)] +pub struct ValidatorKeys { + /// Penumbra spending key and viewing key for this node. + /// These need to be real curve points. + pub validator_id_sk: SigningKey, + pub validator_id_vk: VerificationKey, + pub validator_spend_key: SpendKeyBytes, + /// Consensus key for tendermint. + pub validator_cons_sk: tendermint::PrivateKey, + pub validator_cons_pk: tendermint::PublicKey, + /// P2P auth key for tendermint. + pub node_key_sk: tendermint::PrivateKey, + /// The identity key for the validator. + pub identity_key: IdentityKey, + #[allow(unused_variables, dead_code)] + pub node_key_pk: tendermint::PublicKey, +} + +impl ValidatorKeys { + /// Use a hard-coded seed to generate a new set of validator keys. + pub fn from_seed(seed: [u8; 32]) -> Self { + // Create the spend key for this node. + let seed = SpendKeyBytes(seed); + let spend_key = SpendKey::from(seed.clone()); + + // Create signing key and verification key for this node. + let validator_id_sk = spend_key.spend_auth_key(); + let validator_id_vk = VerificationKey::from(validator_id_sk); + + let validator_cons_sk = ed25519_consensus::SigningKey::from(seed.0); + + // generate consensus key for tendermint. + let validator_cons_sk = tendermint::PrivateKey::Ed25519( + validator_cons_sk + .as_bytes() + .as_slice() + .try_into() + .expect("32 bytes"), + ); + let validator_cons_pk = validator_cons_sk.public_key(); + + // generate P2P auth key for tendermint. + let node_key_sk = ed25519_consensus::SigningKey::from(seed.0); + let signing_key_bytes = node_key_sk.as_bytes().as_slice(); + + // generate consensus key for tendermint. + let node_key_sk = + tendermint::PrivateKey::Ed25519(signing_key_bytes.try_into().expect("32 bytes")); + let node_key_pk = node_key_sk.public_key(); + + let identity_key: IdentityKey = IdentityKey( + spend_key + .full_viewing_key() + .spend_verification_key() + .clone() + .into(), + ); + ValidatorKeys { + validator_id_sk: validator_id_sk.clone(), + validator_id_vk, + validator_cons_sk, + validator_cons_pk, + node_key_sk, + node_key_pk, + validator_spend_key: seed, + identity_key, + } + } +} + +/// A genesis state that can be fed into CometBFT as well, +/// for verifying compliance of the mock tendermint implementation. +pub fn get_verified_genesis() -> Result { + let start_time = tendermint::Time::parse_from_rfc3339("2022-02-11T17:30:50.425417198Z")?; + let vkeys_a = ValidatorKeys::from_seed([0u8; 32]); + + // TODO: make it possible to flag exporting the app state, keys, etc. + // to files possible on the builder + // genesis contents need to contain validator information in the app state + let mut genesis_contents = + genesis::Content::default().with_chain_id(TestNode::<()>::CHAIN_ID.to_string()); + + let spend_key_a = SpendKey::from(vkeys_a.validator_spend_key.clone()); + let validator_a = Validator { + identity_key: Some(IdentityKey(vkeys_a.validator_id_vk.into()).into()), + governance_key: Some(GovernanceKey(spend_key_a.spend_auth_key().into()).into()), + consensus_key: vkeys_a.validator_cons_pk.to_bytes(), + name: "test".to_string(), + website: "https://example.com".to_string(), + description: "test".to_string(), + enabled: true, + funding_streams: vec![], + sequence_number: 0, + }; + + // let's only do one validator per chain for now + // since it's easier to validate against cometbft + genesis_contents + .stake_content + .validators + .push(validator_a.clone()); + + // the validator needs some initial delegations + let identity_key_a: IdentityKey = IdentityKey( + spend_key_a + .full_viewing_key() + .spend_verification_key() + .clone() + .into(), + ); + let delegation_id_a = DelegationToken::from(&identity_key_a).denom(); + let ivk_a = spend_key_a.incoming_viewing_key(); + genesis_contents + .shielded_pool_content + .allocations + .push(Allocation { + address: ivk_a.payment_address(0u32.into()).0, + raw_amount: (25_000 * 10u128.pow(6)).into(), + raw_denom: delegation_id_a.to_string(), + }); + + let genesis = Genesis { + genesis_time: start_time.clone(), + chain_id: genesis_contents + .chain_id + .parse::() + .context("failed to parse chain ID")?, + initial_height: 0, + consensus_params: tendermint::consensus::Params { + abci: AbciParams::default(), + block: tendermint::block::Size { + // 1MB + max_bytes: MAX_BLOCK_TXS_PAYLOAD_BYTES as u64, + // Set to infinity since a chain running Penumbra won't use + // cometbft's notion of gas. + max_gas: -1, + // Minimum time increment between consecutive blocks. + time_iota_ms: 500, + }, + evidence: tendermint::evidence::Params { + // We should keep this in approximate sync with the recommended default for + // `StakeParameters::unbonding_delay`, this is roughly a week. + max_age_num_blocks: 130000, + // Similarly, we set the max age duration for evidence to be a little over a week. + max_age_duration: tendermint::evidence::Duration(Duration::from_secs(650000)), + // 30KB + max_bytes: MAX_EVIDENCE_SIZE_BYTES as i64, + }, + validator: tendermint::consensus::params::ValidatorParams { + pub_key_types: vec![Algorithm::Ed25519], + }, + version: Some(tendermint::consensus::params::VersionParams { app: 0 }), + }, + // always empty in genesis json + app_hash: tendermint::AppHash::default(), + // app_state: genesis_contents.into(), + app_state: serde_json::value::to_value(penumbra_app::genesis::AppState::Content( + genesis_contents, + )) + .unwrap(), + // Set empty validator set for Tendermint config, which falls back to reading + // validators from the AppState, via ResponseInitChain: + // https://docs.tendermint.com/v0.32/tendermint-core/using-tendermint.html + validators: vec![], + }; + + Ok(genesis) +} diff --git a/crates/core/app/tests/common/ibc_tests/node.rs b/crates/core/app/tests/common/ibc_tests/node.rs new file mode 100644 index 0000000000..bbe4edb122 --- /dev/null +++ b/crates/core/app/tests/common/ibc_tests/node.rs @@ -0,0 +1,315 @@ +use { + crate::common::{BuilderExt as _, TempStorageExt as _}, + anyhow::{anyhow, Context as _, Result}, + cnidarium::TempStorage, + ed25519_consensus::{SigningKey, VerificationKey}, + ibc_proto::ibc::core::{ + channel::v1::query_client::QueryClient as IbcChannelQueryClient, + client::v1::query_client::QueryClient as IbcClientQueryClient, + connection::v1::query_client::QueryClient as IbcConnectionQueryClient, + }, + ibc_types::{ + core::{ + client::{ClientId, ClientType, Height}, + connection::{ChainId, ConnectionEnd, ConnectionId, Counterparty, Version}, + }, + lightclients::tendermint::{ + consensus_state::ConsensusState, header::Header as TendermintHeader, + }, + }, + penumbra_app::{ + genesis::{self, AppState}, + server::consensus::Consensus, + }, + penumbra_ibc::{component::ClientStateReadExt as _, IBC_COMMITMENT_PREFIX}, + penumbra_keys::test_keys, + penumbra_mock_client::MockClient, + penumbra_mock_consensus::TestNode, + penumbra_proto::util::tendermint_proxy::v1::{ + tendermint_proxy_service_client::TendermintProxyServiceClient, GetStatusRequest, + }, + std::error::Error, + tap::{Tap, TapFallible}, + tendermint::{ + v0_37::abci::{ConsensusRequest, ConsensusResponse}, + vote::Power, + Time, + }, + tokio::time, + tonic::transport::Channel, + tower_actor::Actor, + tracing::info, +}; + +// Contains some data from a single IBC connection + client for test usage. +// This might be better off as an extension trait or additional impl on the TestNode struct. +#[allow(unused)] +pub struct TestNodeWithIBC { + pub connection_id: ConnectionId, + pub client_id: ClientId, + pub chain_id: String, + pub counterparty: Counterparty, + pub version: Version, + pub signer: String, + pub connection: Option, + pub node: TestNode>>, + pub storage: TempStorage, + pub ibc_client_query_client: IbcClientQueryClient, + pub ibc_connection_query_client: IbcConnectionQueryClient, + pub _ibc_channel_query_client: IbcChannelQueryClient, + pub tendermint_proxy_service_client: TendermintProxyServiceClient, +} + +#[allow(unused)] +/// This interacts with a node similarly to how a relayer would. We intentionally call +/// against the external gRPC interfaces to get the most comprehensive test coverage. +impl TestNodeWithIBC { + pub async fn new( + suffix: &str, + start_time: Time, + keys: (SigningKey, VerificationKey), + ) -> Result { + let chain_id = format!("{}-{}", TestNode::<()>::CHAIN_ID, suffix); + // Use the correct substores + let storage = TempStorage::new_with_penumbra_prefixes().await?; + // Instantiate a mock tendermint proxy, which we will connect to the test node. + let proxy = penumbra_mock_tendermint_proxy::TestNodeProxy::new::(); + + let node = { + let app_state = + AppState::Content(genesis::Content::default().with_chain_id(chain_id.clone())); + let consensus = Consensus::new(storage.as_ref().clone()); + TestNode::builder() + .with_keys(vec![keys]) + .single_validator() + .with_initial_timestamp(start_time) + .with_penumbra_auto_app_state(app_state)? + .on_block(proxy.on_block_callback()) + .init_chain(consensus) + .await + .tap_ok(|e| tracing::info!(hash = %e.last_app_hash_hex(), "finished init chain"))? + }; + + // to select a port number just index on the suffix for now + let index = match suffix { + "a" => 0, + "b" => 1, + _ => unreachable!("update this hack"), + }; + let grpc_url = format!("http://127.0.0.1:808{}", index) // see #4517 + .parse::()? + .tap(|url| tracing::debug!(%url, "parsed grpc url")); + + tracing::info!("spawning gRPC..."); + // Spawn the node's RPC server. + let _rpc_server = { + let make_svc = penumbra_app::rpc::router( + storage.as_ref(), + proxy, + false, /*enable_expensive_rpc*/ + )? + .into_router() + .layer(tower_http::cors::CorsLayer::permissive()) + .into_make_service() + .tap(|_| tracing::info!("initialized rpc service")); + let [addr] = grpc_url + .socket_addrs(|| None)? + .try_into() + .expect("grpc url can be turned into a socket address"); + let server = axum_server::bind(addr).serve(make_svc); + tokio::spawn(async { server.await.expect("grpc server returned an error") }) + .tap(|_| tracing::info!("grpc server is running")) + }; + + time::sleep(time::Duration::from_secs(1)).await; + // Create an RPC server for each chain to respond to IBC-related queries. + let channel = Channel::from_shared(grpc_url.to_string()) + .with_context(|| "could not parse node URI")? + .connect() + .await + .with_context(|| "could not connect to grpc server") + .tap_err(|error| tracing::error!(?error, "could not connect to grpc server"))?; + + let ibc_connection_query_client = IbcConnectionQueryClient::new(channel.clone()); + let ibc_channel_query_client = IbcChannelQueryClient::new(channel.clone()); + let ibc_client_query_client = IbcClientQueryClient::new(channel.clone()); + let tendermint_proxy_service_client = TendermintProxyServiceClient::new(channel.clone()); + + let pk = node + .keyring() + .iter() + .next() + .expect("validator key in keyring") + .0; + let proposer_address = tendermint::account::Id::new( + ::digest(pk).as_slice()[0..20] + .try_into() + .expect(""), + ); + Ok(Self { + // the test relayer supports only a single connection on each chain as of now + connection_id: ConnectionId::new(0), + node, + storage, + client_id: ClientId::new(ClientType::new("07-tendermint".to_string()), 0)?, + chain_id: chain_id.clone(), + counterparty: Counterparty { + client_id: ClientId::new(ClientType::new("07-tendermint".to_string()), 0)?, + connection_id: None, + prefix: IBC_COMMITMENT_PREFIX.to_owned(), + }, + version: Version::default(), + signer: hex::encode_upper(proposer_address), + connection: None, + ibc_connection_query_client, + _ibc_channel_query_client: ibc_channel_query_client, + ibc_client_query_client, + tendermint_proxy_service_client, + }) + } + + pub async fn client(&mut self) -> Result { + // Sync the mock client, using the test wallet's spend key, to the latest snapshot. + Ok(MockClient::new(test_keys::SPEND_KEY.clone()) + .with_sync_to_storage(&self.storage) + .await? + .tap(|c| info!(client.notes = %c.notes.len(), "mock client synced to test storage"))) + } + + pub async fn get_latest_height(&mut self) -> Result { + let status: penumbra_proto::util::tendermint_proxy::v1::GetStatusResponse = self + .tendermint_proxy_service_client + .get_status(GetStatusRequest {}) + .await? + .into_inner(); + Ok(Height::new( + ChainId::chain_version(&self.chain_id), + status + .sync_info + .ok_or(anyhow!("no sync info"))? + .latest_block_height, + )?) + } + + // TODO: maybe move to an IBC extension trait for TestNode? + // or maybe the Block has everything it needs to produce this? + pub fn create_tendermint_header( + &self, + trusted_height: Option, + penumbra_proto::util::tendermint_proxy::v1::GetBlockByHeightResponse{block_id: _, block}: penumbra_proto::util::tendermint_proxy::v1::GetBlockByHeightResponse, + ) -> Result { + let pk = self + .node + .keyring() + .iter() + .next() + .expect("validator key in keyring") + .0; + let block = block.ok_or(anyhow!("no block"))?; + let header = block.header.ok_or(anyhow!("no header"))?; + + // the tendermint SignedHeader is non_exhaustive so we + // can't use struct syntax to instantiate it and have to do + // some annoying manual construction of the pb type instead. + let h: tendermint::block::Header = header.clone().try_into().expect("bad header"); + use tendermint_proto::v0_37::types::SignedHeader as RawSignedHeader; + // The SignedHeader is the header accompanied by the commit to prove it. + let rsh: RawSignedHeader = RawSignedHeader { + header: Some(tendermint_proto::v0_37::types::Header { + version: Some(tendermint_proto::v0_37::version::Consensus { + block: header.version.as_ref().expect("version").block, + app: header.version.expect("version").app, + }), + chain_id: header.chain_id, + height: header.height.into(), + time: Some(tendermint_proto::google::protobuf::Timestamp { + seconds: header.time.as_ref().expect("time").seconds, + nanos: header.time.expect("time").nanos, + }), + last_block_id: header.last_block_id.clone().map(|a| { + tendermint_proto::v0_37::types::BlockId { + hash: a.hash, + part_set_header: a.part_set_header.map(|b| { + tendermint_proto::v0_37::types::PartSetHeader { + total: b.total, + hash: b.hash, + } + }), + } + }), + last_commit_hash: header.last_commit_hash.into(), + data_hash: header.data_hash.into(), + validators_hash: header.validators_hash.into(), + next_validators_hash: header.next_validators_hash.into(), + consensus_hash: header.consensus_hash.into(), + app_hash: header.app_hash.into(), + last_results_hash: header.last_results_hash.into(), + evidence_hash: header.evidence_hash.into(), + proposer_address: header.proposer_address.into(), + }), + commit: Some(tendermint_proto::v0_37::types::Commit { + // The commit is for the current height + height: header.height.into(), + round: 0.into(), + block_id: Some(tendermint_proto::v0_37::types::BlockId { + hash: h.hash().into(), + part_set_header: Some(tendermint_proto::v0_37::types::PartSetHeader { + total: 0, + hash: vec![], + }), + }), + // signatures for this block + signatures: self + .node + .last_commit() + .unwrap() + .signatures + .clone() + .into_iter() + .map(Into::into) + .collect::>(), + }), + }; + + let signed_header = rsh.clone().try_into()?; + + // now get a SignedHeader + let pub_key = + tendermint::PublicKey::from_raw_ed25519(pk.as_bytes()).expect("pub key present"); + let proposer_address = tendermint::account::Id::new( + ::digest(pk).as_slice()[0..20] + .try_into() + .expect(""), + ); + // TODO: don't hardcode these + let validator_set = tendermint::validator::Set::new( + vec![tendermint::validator::Info { + address: proposer_address.try_into()?, + pub_key, + power: Power::try_from(25_000 * 10i64.pow(6))?, + name: Some("test validator".to_string()), + proposer_priority: 1i64.try_into()?, + }], + // Same validator as proposer? + Some(tendermint::validator::Info { + address: proposer_address.try_into()?, + pub_key, + power: Power::try_from(25_000 * 10i64.pow(6))?, + name: Some("test validator".to_string()), + proposer_priority: 1i64.try_into()?, + }), + ); + + // now we can make the Header + let header = TendermintHeader { + signed_header, + validator_set: validator_set.clone(), + trusted_validator_set: validator_set.clone(), + trusted_height: trusted_height.unwrap_or_else(|| ibc_types::core::client::Height { + revision_number: 0, + revision_height: 0, + }), + }; + Ok(header) + } +} diff --git a/crates/core/app/tests/common/ibc_tests/relayer.rs b/crates/core/app/tests/common/ibc_tests/relayer.rs new file mode 100644 index 0000000000..86705d770d --- /dev/null +++ b/crates/core/app/tests/common/ibc_tests/relayer.rs @@ -0,0 +1,907 @@ +use { + super::TestNodeWithIBC, + anyhow::{anyhow, Result}, + ibc_proto::ibc::core::{ + client::v1::{QueryClientStateRequest, QueryConsensusStateRequest}, + connection::v1::QueryConnectionRequest, + }, + ibc_types::lightclients::tendermint::client_state::ClientState as TendermintClientState, + ibc_types::{ + core::{ + client::{ + msgs::{MsgCreateClient, MsgUpdateClient}, + Height, + }, + commitment::{MerkleProof, MerkleRoot}, + connection::{ + msgs::{ + MsgConnectionOpenAck, MsgConnectionOpenConfirm, MsgConnectionOpenInit, + MsgConnectionOpenTry, + }, + ConnectionEnd, Counterparty, State as ConnectionState, Version, + }, + }, + lightclients::tendermint::{ + client_state::AllowUpdate, consensus_state::ConsensusState, + header::Header as TendermintHeader, TrustThreshold, + }, + DomainType as _, + }, + penumbra_ibc::{ + component::ConnectionStateReadExt as _, IbcRelay, IBC_COMMITMENT_PREFIX, IBC_PROOF_SPECS, + }, + penumbra_proto::{util::tendermint_proxy::v1::GetBlockByHeightRequest, DomainType}, + penumbra_stake::state_key::chain, + penumbra_transaction::{TransactionParameters, TransactionPlan}, + prost::Message as _, + sha2::Digest, + std::time::Duration, + tendermint::Time, +}; +#[allow(unused)] +pub struct MockRelayer { + pub chain_a_ibc: TestNodeWithIBC, + pub chain_b_ibc: TestNodeWithIBC, +} + +#[allow(unused)] +impl MockRelayer { + pub async fn get_connection_states(&mut self) -> Result<(ConnectionState, ConnectionState)> { + let connection_on_a_response = self + .chain_a_ibc + .ibc_connection_query_client + .connection(QueryConnectionRequest { + connection_id: self.chain_a_ibc.connection_id.to_string(), + }) + .await? + .into_inner(); + let connection_on_b_response = self + .chain_b_ibc + .ibc_connection_query_client + .connection(QueryConnectionRequest { + connection_id: self.chain_b_ibc.connection_id.to_string(), + }) + .await? + .into_inner(); + + Ok( + match ( + connection_on_a_response.connection, + connection_on_b_response.connection, + ) { + (Some(connection_a), Some(connection_b)) => { + let connection_a: ConnectionEnd = connection_a.try_into().unwrap(); + let connection_b: ConnectionEnd = connection_b.try_into().unwrap(); + (connection_a.state, connection_b.state) + } + (None, None) => ( + ConnectionState::Uninitialized, + ConnectionState::Uninitialized, + ), + (None, Some(connection_b)) => { + let connection_b: ConnectionEnd = connection_b.try_into().unwrap(); + (ConnectionState::Uninitialized, connection_b.state) + } + (Some(connection_a), None) => { + let connection_a: ConnectionEnd = connection_a.try_into().unwrap(); + (connection_a.state, ConnectionState::Uninitialized) + } + }, + ) + } + + pub async fn _handshake(&mut self) -> Result<(), anyhow::Error> { + // The IBC connection handshake has four steps (Init, Try, Ack, Confirm). + // https://github.com/penumbra-zone/hermes/blob/a34a11fec76de3b573b539c237927e79cb74ec00/crates/relayer/src/connection.rs#L672 + // https://github.com/cosmos/ibc/blob/main/spec/core/ics-003-connection-semantics/README.md#opening-handshake + + self._sync_chains().await?; + + let (a_state, b_state) = self.get_connection_states().await?; + assert!( + a_state == ConnectionState::Uninitialized && b_state == ConnectionState::Uninitialized + ); + + // 1: send the Init message to chain A + { + tracing::info!("Send Init to chain A"); + self._build_and_send_connection_open_init().await?; + } + + let (a_state, b_state) = self.get_connection_states().await?; + assert!(a_state == ConnectionState::Init && b_state == ConnectionState::Uninitialized); + + self._sync_chains().await?; + + // 2. send the OpenTry message to chain B + { + tracing::info!("send OpenTry to chain B"); + self._build_and_send_connection_open_try().await?; + } + + let (a_state, b_state) = self.get_connection_states().await?; + assert!(a_state == ConnectionState::Init && b_state == ConnectionState::TryOpen); + + self._sync_chains().await?; + + // 3. Send the OpenAck message to chain A + { + tracing::info!("send OpenAck to chain A"); + self._build_and_send_connection_open_ack().await?; + } + + let (a_state, b_state) = self.get_connection_states().await?; + assert!(a_state == ConnectionState::Open && b_state == ConnectionState::TryOpen); + + self._sync_chains().await?; + + // 4. Send the OpenConfirm message to chain B + { + tracing::info!("send OpenConfirm to chain B"); + self._build_and_send_connection_open_confirm().await?; + } + + let (a_state, b_state) = self.get_connection_states().await?; + assert!(a_state == ConnectionState::Open && b_state == ConnectionState::Open); + + // Ensure the chain timestamps remain in sync + self._sync_chains().await?; + + Ok(()) + } + + pub async fn _create_clients(&mut self) -> Result<(), anyhow::Error> { + self._sync_chains().await?; + // helper function to create client for chain B on chain A + async fn _create_client_inner( + chain_a_ibc: &mut TestNodeWithIBC, + chain_b_ibc: &mut TestNodeWithIBC, + ) -> Result<()> { + let pk = chain_b_ibc + .node + .keyring() + .iter() + .next() + .expect("validator key in keyring") + .0; + let proposer_address = tendermint::account::Id::new( + ::digest(pk).as_slice()[0..20] + .try_into() + .expect(""), + ); + let pub_key = + tendermint::PublicKey::from_raw_ed25519(pk.as_bytes()).expect("pub key present"); + + // Create the client for chain B on chain A. + let plan = { + let ibc_msg = IbcRelay::CreateClient(MsgCreateClient { + // Chain B will be signing messages to chain A + signer: chain_b_ibc.signer.clone(), + client_state: ibc_types::lightclients::tendermint::client_state::ClientState { + // Chain ID of the client state is for the counterparty + chain_id: chain_b_ibc.chain_id.clone().into(), + trust_level: TrustThreshold { + numerator: 1, + denominator: 3, + }, + trusting_period: Duration::from_secs(120_000), + unbonding_period: Duration::from_secs(240_000), + max_clock_drift: Duration::from_secs(5), + // The latest_height is for chain B + latest_height: chain_b_ibc.get_latest_height().await?, + // The ICS02 validation is hardcoded to expect 2 proof specs + // (root and substore, see [`penumbra_ibc::component::ics02_validation`]). + proof_specs: IBC_PROOF_SPECS.to_vec(), + upgrade_path: vec!["upgrade".to_string(), "upgradedIBCState".to_string()], + allow_update: AllowUpdate { + after_expiry: false, + after_misbehaviour: false, + }, + frozen_height: None, + } + .into(), + consensus_state: + ibc_types::lightclients::tendermint::consensus_state::ConsensusState { + timestamp: *chain_b_ibc.node.timestamp(), + root: MerkleRoot { + hash: chain_b_ibc.node.last_app_hash().to_vec(), + }, + next_validators_hash: (*chain_b_ibc + .node + .last_validator_set_hash() + .unwrap()) + .into(), + } + .into(), + }) + .into(); + TransactionPlan { + actions: vec![ibc_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: chain_a_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + let tx = chain_a_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + // Create the client for chain B on chain A. + chain_a_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + + Ok(()) + } + + // Each chain will need a client created corresponding to its IBC connection with the other chain: + _create_client_inner(&mut self.chain_a_ibc, &mut self.chain_b_ibc).await?; + _create_client_inner(&mut self.chain_b_ibc, &mut self.chain_a_ibc).await?; + + Ok(()) + } + + // helper function to build ConnectionOpenInit to chain A + pub async fn _build_and_send_connection_open_init(&mut self) -> Result<()> { + self._sync_chains().await?; + let chain_a_ibc = &mut self.chain_a_ibc; + let chain_b_ibc = &mut self.chain_b_ibc; + let plan = { + let ibc_msg = IbcRelay::ConnectionOpenInit(MsgConnectionOpenInit { + client_id_on_a: chain_a_ibc.client_id.clone(), + counterparty: chain_a_ibc.counterparty.clone(), + version: Some(chain_a_ibc.version.clone()), + delay_period: Duration::from_secs(1), + signer: chain_b_ibc.signer.clone(), + }) + .into(); + TransactionPlan { + actions: vec![ibc_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: chain_a_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + let tx = chain_a_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + // Execute the transaction, applying it to the chain state. + let pre_tx_snapshot = chain_a_ibc.storage.latest_snapshot(); + chain_a_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + let post_tx_snapshot = chain_a_ibc.storage.latest_snapshot(); + + // validate the connection state is now "init" + { + // Connection should not exist pre-commit + assert!(pre_tx_snapshot + .get_connection(&chain_a_ibc.connection_id) + .await? + .is_none(),); + + // Post-commit, the connection should be in the "init" state. + let connection = post_tx_snapshot + .get_connection(&chain_a_ibc.connection_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no connection with the specified ID {} exists", + &chain_a_ibc.connection_id + ) + })?; + + assert_eq!(connection.state.clone(), ConnectionState::Init); + + chain_a_ibc.connection = Some(connection.clone()); + } + + Ok(()) + } + + pub async fn handshake(&mut self) -> Result<(), anyhow::Error> { + // Open a connection on each chain to the other chain. + // This is accomplished by following the ICS-003 spec for connection handshakes. + + // The Clients need to be created on each chain prior to the handshake. + self._create_clients().await?; + // The handshake is a multi-step process, this call will ratchet through the steps. + self._handshake().await?; + + Ok(()) + } + + // helper function to sync the chain times + pub async fn _sync_chains(&mut self) -> Result<()> { + let mut chain_a_time = self.chain_a_ibc.node.timestamp(); + let mut chain_b_time = self.chain_b_ibc.node.timestamp(); + + while chain_a_time < chain_b_time { + self.chain_a_ibc.node.block().execute().await?; + chain_a_time = self.chain_a_ibc.node.timestamp(); + } + while chain_b_time < chain_a_time { + self.chain_b_ibc.node.block().execute().await?; + chain_b_time = self.chain_b_ibc.node.timestamp(); + } + + chain_a_time = self.chain_a_ibc.node.timestamp(); + chain_b_time = self.chain_b_ibc.node.timestamp(); + assert_eq!(chain_a_time, chain_b_time); + + Ok(()) + } + + // tell chain b about chain a + pub async fn _build_and_send_update_client_b(&mut self) -> Result { + tracing::info!( + "send update client for chain {} to chain {}", + self.chain_a_ibc.chain_id, + self.chain_b_ibc.chain_id, + ); + // reverse these because we're sending to chain B + let chain_a_ibc = &mut self.chain_b_ibc; + let chain_b_ibc = &mut self.chain_a_ibc; + + _build_and_send_update_client(chain_a_ibc, chain_b_ibc).await + } + + // helper function to build UpdateClient to send to chain A + pub async fn _build_and_send_update_client_a(&mut self) -> Result { + tracing::info!( + "send update client for chain {} to chain {}", + self.chain_b_ibc.chain_id, + self.chain_a_ibc.chain_id, + ); + let chain_a_ibc = &mut self.chain_a_ibc; + let chain_b_ibc = &mut self.chain_b_ibc; + + _build_and_send_update_client(chain_a_ibc, chain_b_ibc).await + } + + // Send an ACK message to chain A + // https://github.com/penumbra-zone/hermes/blob/a34a11fec76de3b573b539c237927e79cb74ec00/crates/relayer/src/connection.rs#L1126 + pub async fn _build_and_send_connection_open_ack(&mut self) -> Result<()> { + // This is a load-bearing block execution that should be removed + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._sync_chains().await?; + + let chain_b_connection_id = self.chain_b_ibc.connection_id.clone(); + let chain_a_connection_id = self.chain_a_ibc.connection_id.clone(); + + // Build message(s) for updating client on source + let src_client_height = self._build_and_send_update_client_a().await?; + // Build message(s) for updating client on destination + let dst_client_height = self._build_and_send_update_client_b().await?; + + let connection_of_a_on_b_response = self + .chain_b_ibc + .ibc_connection_query_client + .connection(QueryConnectionRequest { + connection_id: chain_a_connection_id.to_string(), + }) + .await? + .into_inner(); + let client_state_of_a_on_b_response = self + .chain_b_ibc + .ibc_client_query_client + .client_state(QueryClientStateRequest { + client_id: self.chain_a_ibc.client_id.to_string(), + }) + .await? + .into_inner(); + let consensus_state_of_a_on_b_response = self + .chain_b_ibc + .ibc_client_query_client + .consensus_state(QueryConsensusStateRequest { + client_id: self.chain_a_ibc.client_id.to_string(), + revision_number: 0, + revision_height: 0, + latest_height: true, + }) + .await? + .into_inner(); + assert_eq!( + connection_of_a_on_b_response.clone().proof_height, + consensus_state_of_a_on_b_response.clone().proof_height + ); + assert_eq!( + client_state_of_a_on_b_response.clone().proof_height, + consensus_state_of_a_on_b_response.clone().proof_height + ); + + let proof_height_on_b = client_state_of_a_on_b_response.clone().proof_height; + + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._build_and_send_update_client_a().await?; + self._sync_chains().await?; + + let plan = { + // This mocks the relayer constructing a connection open try message on behalf + // of the counterparty chain. + // we can't directly construct this because one of the struct fields is private + // and it's not from this crate, but we _can_ create the proto type and then convert it! + let proto_ack = ibc_proto::ibc::core::connection::v1::MsgConnectionOpenAck { + connection_id: self.chain_a_ibc.connection_id.to_string(), + counterparty_connection_id: chain_b_connection_id.to_string(), + version: Some(Version::default().into()), + client_state: Some( + client_state_of_a_on_b_response + .clone() + .client_state + .unwrap(), + ), + proof_height: Some(proof_height_on_b.unwrap()), + proof_try: connection_of_a_on_b_response.proof, + proof_client: client_state_of_a_on_b_response.clone().proof, + proof_consensus: consensus_state_of_a_on_b_response.proof, + // consensus height of a on b (the height chain b's ibc client trusts chain a at) + consensus_height: Some( + ibc_types::lightclients::tendermint::client_state::ClientState::try_from( + client_state_of_a_on_b_response + .clone() + .client_state + .unwrap(), + )? + .latest_height + .into(), + ), + signer: self.chain_b_ibc.signer.clone(), + // optional field, don't include + host_consensus_state_proof: vec![], + }; + let ibc_msg = + IbcRelay::ConnectionOpenAck(MsgConnectionOpenAck::try_from(proto_ack)?).into(); + TransactionPlan { + actions: vec![ibc_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_a_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + let tx = self + .chain_a_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + // Execute the transaction, applying it to the chain state. + let pre_tx_snapshot = self.chain_a_ibc.storage.latest_snapshot(); + self.chain_a_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + let post_tx_snapshot = self.chain_a_ibc.storage.latest_snapshot(); + + // validate the connection state is now "OPEN" + { + // Connection should be in INIT pre-commit + let connection = pre_tx_snapshot + .get_connection(&self.chain_a_ibc.connection_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no connection with the specified ID {} exists", + &self.chain_a_ibc.connection_id + ) + })?; + + assert_eq!(connection.state, ConnectionState::Init); + + // Post-commit, the connection should be in the "OPEN" state. + let connection = post_tx_snapshot + .get_connection(&self.chain_a_ibc.connection_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no connection with the specified ID {} exists", + &self.chain_a_ibc.connection_id + ) + })?; + + assert_eq!(connection.state, ConnectionState::Open); + + self.chain_a_ibc.connection = Some(connection); + } + + Ok(()) + } + + // helper function to build ConnectionOpenTry to send to chain B + // at this point chain A is in INIT state and chain B has no state + // after this, chain A will be in INIT and chain B will be in TRYOPEN state. + pub async fn _build_and_send_connection_open_try(&mut self) -> Result<()> { + // This is a load-bearing block execution that should be removed + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._sync_chains().await?; + + let src_connection = self + .chain_a_ibc + .ibc_connection_query_client + .connection(QueryConnectionRequest { + connection_id: self.chain_a_ibc.connection_id.to_string(), + }) + .await? + .into_inner(); + + let chain_b_height = self._build_and_send_update_client_a().await?; + let chain_a_height = self._build_and_send_update_client_b().await?; + + let client_state_of_b_on_a_response = self + .chain_a_ibc + .ibc_client_query_client + .client_state(QueryClientStateRequest { + client_id: self.chain_b_ibc.client_id.to_string(), + }) + .await? + .into_inner(); + let connection_of_b_on_a_response = self + .chain_a_ibc + .ibc_connection_query_client + .connection(QueryConnectionRequest { + connection_id: self.chain_b_ibc.connection_id.to_string(), + }) + .await? + .into_inner(); + let consensus_state_of_b_on_a_response = self + .chain_a_ibc + .ibc_client_query_client + .consensus_state(QueryConsensusStateRequest { + client_id: self.chain_b_ibc.client_id.to_string(), + revision_number: 0, + revision_height: 0, + latest_height: true, + }) + .await? + .into_inner(); + + // Then construct the ConnectionOpenTry message + let proof_consensus_state_of_b_on_a = + MerkleProof::decode(consensus_state_of_b_on_a_response.clone().proof.as_slice())?; + + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._sync_chains().await?; + + assert_eq!( + consensus_state_of_b_on_a_response.proof_height, + client_state_of_b_on_a_response.proof_height + ); + assert_eq!( + connection_of_b_on_a_response.proof_height, + client_state_of_b_on_a_response.proof_height + ); + + let proofs_height_on_a: Height = connection_of_b_on_a_response + .proof_height + .clone() + .unwrap() + .try_into()?; + + let proof_client_state_of_b_on_a = + MerkleProof::decode(client_state_of_b_on_a_response.clone().proof.as_slice())?; + let proof_conn_end_on_a = + MerkleProof::decode(connection_of_b_on_a_response.clone().proof.as_slice())?; + let proof_consensus_state_of_b_on_a = + MerkleProof::decode(consensus_state_of_b_on_a_response.clone().proof.as_slice())?; + + // TODO: too side-effecty? + self.chain_b_ibc.counterparty.connection_id = Some(self.chain_a_ibc.connection_id.clone()); + self.chain_a_ibc.counterparty.connection_id = Some(self.chain_b_ibc.connection_id.clone()); + + self._build_and_send_update_client_b().await?; + self._sync_chains().await?; + + let cs: TendermintClientState = client_state_of_b_on_a_response + .clone() + .client_state + .unwrap() + .try_into()?; + let plan = { + // This mocks the relayer constructing a connection open try message on behalf + // of the counterparty chain. + #[allow(deprecated)] + let ibc_msg = IbcRelay::ConnectionOpenTry(MsgConnectionOpenTry { + // Counterparty is chain A. + counterparty: Counterparty { + client_id: self.chain_a_ibc.client_id.clone(), + connection_id: Some(self.chain_a_ibc.connection_id.clone()), + prefix: IBC_COMMITMENT_PREFIX.to_owned(), + }, + delay_period: Duration::from_secs(1), + signer: self.chain_a_ibc.signer.clone(), + client_id_on_b: self.chain_b_ibc.client_id.clone(), + client_state_of_b_on_a: client_state_of_b_on_a_response + .client_state + .expect("client state present"), + versions_on_a: vec![Version::default()], + proof_conn_end_on_a, + proof_client_state_of_b_on_a, + proof_consensus_state_of_b_on_a, + proofs_height_on_a, + consensus_height_of_b_on_a: chain_b_height, + // this seems to be an optional proof + proof_consensus_state_of_b: None, + // deprecated + previous_connection_id: "".to_string(), + }) + .into(); + TransactionPlan { + actions: vec![ibc_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_b_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + let tx = self + .chain_b_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + // Execute the transaction, applying it to the chain state. + let pre_tx_snapshot = self.chain_b_ibc.storage.latest_snapshot(); + + // validate the chain b pre-tx storage root hash is what we expect: + let pre_tx_hash = pre_tx_snapshot.root_hash().await?; + + // Validate the tx hash is what we expect: + let tx_hash = sha2::Sha256::digest(&tx.encode_to_vec()); + + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + + // execute the transaction containing the opentry message + self.chain_b_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + self.chain_b_ibc.node.block().execute().await?; + let post_tx_snapshot = self.chain_b_ibc.storage.latest_snapshot(); + + // validate the connection state is now "tryopen" + { + // Connection should not exist pre-commit + assert!(pre_tx_snapshot + .get_connection(&self.chain_b_ibc.connection_id) + .await? + .is_none(),); + + // Post-commit, the connection should be in the "tryopen" state. + let connection = post_tx_snapshot + .get_connection(&self.chain_b_ibc.connection_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no connection with the specified ID {} exists", + &self.chain_b_ibc.connection_id + ) + })?; + + assert_eq!(connection.state, ConnectionState::TryOpen); + + self.chain_b_ibc.connection = Some(connection); + } + + self._sync_chains().await?; + + Ok(()) + } + + // sends a ConnectionOpenConfirm message to chain B + // at this point, chain A is in OPEN and B is in TRYOPEN. + // afterwards, chain A will be in OPEN and chain B will be in OPEN. + pub async fn _build_and_send_connection_open_confirm(&mut self) -> Result<()> { + // This is a load-bearing block execution that should be removed + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._sync_chains().await?; + + // https://github.com/penumbra-zone/hermes/blob/a34a11fec76de3b573b539c237927e79cb74ec00/crates/relayer/src/connection.rs#L1296 + let chain_b_connection_id = self.chain_b_ibc.connection_id.clone(); + let connection_of_b_on_a_response = self + .chain_a_ibc + .ibc_connection_query_client + .connection(QueryConnectionRequest { + connection_id: chain_b_connection_id.to_string(), + }) + .await? + .into_inner(); + + let dst_client_target_height = self._build_and_send_update_client_b().await?; + + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._build_and_send_update_client_b().await?; + self._sync_chains().await?; + + let plan = { + // This mocks the relayer constructing a connection open try message on behalf + // of the counterparty chain. + let ibc_msg = IbcRelay::ConnectionOpenConfirm(MsgConnectionOpenConfirm { + conn_id_on_b: self.chain_b_ibc.connection_id.clone(), + proof_conn_end_on_a: MerkleProof::decode( + connection_of_b_on_a_response.clone().proof.as_slice(), + )?, + proof_height_on_a: connection_of_b_on_a_response + .proof_height + .unwrap() + .try_into()?, + signer: self.chain_a_ibc.signer.clone(), + }) + .into(); + TransactionPlan { + actions: vec![ibc_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_b_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + let tx = self + .chain_b_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + // Execute the transaction, applying it to the chain state. + let pre_tx_snapshot = self.chain_b_ibc.storage.latest_snapshot(); + self.chain_b_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + let post_tx_snapshot = self.chain_b_ibc.storage.latest_snapshot(); + + // validate the connection state is now "open" + { + // Connection should be in TRYOPEN pre-commit + let connection = pre_tx_snapshot + .get_connection(&self.chain_b_ibc.connection_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no connection with the specified ID {} exists", + &self.chain_b_ibc.connection_id + ) + })?; + + assert_eq!(connection.state, ConnectionState::TryOpen); + + // Post-commit, the connection should be in the "OPEN" state. + let connection = post_tx_snapshot + .get_connection(&self.chain_b_ibc.connection_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no connection with the specified ID {} exists", + &self.chain_b_ibc.connection_id + ) + })?; + + assert_eq!(connection.state, ConnectionState::Open); + + self.chain_b_ibc.connection = Some(connection); + } + + Ok(()) + } +} + +// tell chain A about chain B. returns the height of chain b on chain a after update. +async fn _build_and_send_update_client( + chain_a_ibc: &mut TestNodeWithIBC, + chain_b_ibc: &mut TestNodeWithIBC, +) -> Result { + let chain_b_height = chain_b_ibc.get_latest_height().await?; + let chain_b_latest_block: penumbra_proto::util::tendermint_proxy::v1::GetBlockByHeightResponse = + chain_b_ibc + .tendermint_proxy_service_client + .get_block_by_height(GetBlockByHeightRequest { + height: chain_b_height.revision_height.try_into()?, + }) + .await? + .into_inner(); + + // Look up the last recorded consensus state for the counterparty client on chain A + // to determine the last trusted height. + let client_state_of_b_on_a_response = chain_a_ibc + .ibc_client_query_client + .client_state(QueryClientStateRequest { + client_id: chain_b_ibc.client_id.to_string(), + }) + .await? + .into_inner(); + let trusted_height = ibc_types::lightclients::tendermint::client_state::ClientState::try_from( + client_state_of_b_on_a_response + .clone() + .client_state + .unwrap(), + )? + .latest_height; + let chain_b_new_height = chain_b_latest_block + .block + .clone() + .unwrap() + .header + .unwrap() + .height; + let plan = { + let ibc_msg = IbcRelay::UpdateClient(MsgUpdateClient { + signer: chain_b_ibc.signer.clone(), + client_id: chain_a_ibc.client_id.clone(), + client_message: chain_b_ibc + // The TendermintHeader is derived from the Block + // and represents chain B's claims about its current state. + .create_tendermint_header(Some(trusted_height), chain_b_latest_block.clone())? + .into(), + }) + .into(); + TransactionPlan { + actions: vec![ibc_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: chain_a_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + let tx = chain_a_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + // Execute the transaction, applying it to the chain state. + chain_a_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + + Ok(chain_b_height) +} diff --git a/crates/core/app/tests/common/mod.rs b/crates/core/app/tests/common/mod.rs index 6774c878f5..36179b5cd7 100644 --- a/crates/core/app/tests/common/mod.rs +++ b/crates/core/app/tests/common/mod.rs @@ -28,3 +28,7 @@ mod test_node_ext; /// See [`ValidatorDataRead`][penumbra_stake::component::validator_handler::ValidatorDataRead], /// and [`ValidatorDataReadExt`]. mod validator_read_ext; + +/// Methods for testing IBC functionality. +#[allow(unused)] +pub mod ibc_tests; diff --git a/crates/core/app/tests/ibc_handshake.rs b/crates/core/app/tests/ibc_handshake.rs new file mode 100644 index 0000000000..e000a824eb --- /dev/null +++ b/crates/core/app/tests/ibc_handshake.rs @@ -0,0 +1,77 @@ +use { + common::ibc_tests::{MockRelayer, TestNodeWithIBC, ValidatorKeys}, + once_cell::sync::Lazy, + std::time::Duration, + tap::Tap as _, +}; + +/// The proof specs for the main store. +pub static MAIN_STORE_PROOF_SPEC: Lazy> = + Lazy::new(|| vec![cnidarium::ics23_spec()]); + +mod common; + +/// Exercises that the IBC handshake succeeds. +#[tokio::test] +async fn ibc_handshake() -> anyhow::Result<()> { + // Install a test logger, and acquire some temporary storage. + let guard = common::set_tracing_subscriber(); + + let block_duration = Duration::from_secs(5); + // Fixed start times (both chains start at the same time to avoid unintended timeouts): + let start_time_a = tendermint::Time::parse_from_rfc3339("2022-02-11T17:30:50.425417198Z")?; + + // But chain B will be 39 blocks ahead of chain A, so offset chain A's + // start time so they match: + let start_time_b = start_time_a.checked_sub(39 * block_duration).unwrap(); + + // Hardcoded keys for each chain for test reproducibility: + let vkeys_a = ValidatorKeys::from_seed([0u8; 32]); + let vkeys_b = ValidatorKeys::from_seed([1u8; 32]); + let sk_a = vkeys_a.validator_cons_sk.ed25519_signing_key().unwrap(); + let sk_b = vkeys_b.validator_cons_sk.ed25519_signing_key().unwrap(); + + let ska = ed25519_consensus::SigningKey::try_from(sk_a.as_bytes())?; + let skb = ed25519_consensus::SigningKey::try_from(sk_b.as_bytes())?; + let keys_a = (ska.clone(), ska.verification_key()); + let keys_b = (skb.clone(), skb.verification_key()); + + // Set up some configuration for the two different chains we'll need to keep around. + let mut chain_a_ibc = TestNodeWithIBC::new("a", start_time_a, keys_a).await?; + let mut chain_b_ibc = TestNodeWithIBC::new("b", start_time_b, keys_b).await?; + + // The two chains can't IBC handshake during the first block, let's fast forward + // them both a few. + for _ in 0..3 { + chain_a_ibc.node.block().execute().await?; + } + // Do them each a different # of blocks to make sure the heights don't get confused. + for _ in 0..42 { + chain_b_ibc.node.block().execute().await?; + } + + // The chains should be at the same time: + assert_eq!(chain_a_ibc.node.timestamp(), chain_b_ibc.node.timestamp()); + // But their block heights should be different: + assert_ne!( + chain_a_ibc.get_latest_height().await?, + chain_b_ibc.get_latest_height().await?, + ); + + assert_eq!( + chain_a_ibc.get_latest_height().await?.revision_height, + chain_a_ibc.storage.latest_snapshot().version() + ); + + // The Relayer will handle IBC operations and manage state for the two test chains + let mut relayer = MockRelayer { + chain_a_ibc, + chain_b_ibc, + }; + + // Perform the IBC connection handshake between the two chains. + // TODO: some testing of failure cases of the handshake process would be good + relayer.handshake().await?; + + Ok(()).tap(|_| drop(relayer)).tap(|_| drop(guard)) +} diff --git a/crates/core/app/tests/mock_consensus_block_proving.rs b/crates/core/app/tests/mock_consensus_block_proving.rs index f4be2d71d6..f522020769 100644 --- a/crates/core/app/tests/mock_consensus_block_proving.rs +++ b/crates/core/app/tests/mock_consensus_block_proving.rs @@ -247,11 +247,11 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { assert_eq!(u64::from(latest_height), storage_revision_height); // Try fetching the client state via the IBC API - let node_last_app_hash = node.last_app_hash(); + let node_last_app_hash = node.last_app_hash().to_vec(); tracing::debug!( "making IBC client state request at height {} and hash {}", latest_height, - hex::encode(node_last_app_hash) + hex::encode(&node_last_app_hash) ); let ibc_client_state_response = ibc_client_query_client .client_state(QueryClientStateRequest { @@ -260,11 +260,51 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { .await? .into_inner(); + assert!( + ibc_client_state_response.client_state.as_ref().is_some() + && !ibc_client_state_response + .client_state + .as_ref() + .unwrap() + .value + .is_empty() + ); + let ibc_proof = MerkleProof::decode(ibc_client_state_response.clone().proof.as_slice())?; let ibc_value = ibc_client_state_response.client_state.unwrap(); assert_eq!(ibc_value.encode_to_vec(), value); + // The current height of the node should be one behind the proof height. + assert_eq!( + u64::from(latest_height) + 1, + ibc_client_state_response + .proof_height + .clone() + .unwrap() + .revision_height + ); + + let proof_block: penumbra_proto::util::tendermint_proxy::v1::GetBlockByHeightResponse = + tendermint_proxy_service_client + .get_block_by_height(GetBlockByHeightRequest { + height: ibc_client_state_response + .proof_height + .clone() + .unwrap() + .revision_height + .try_into()?, + }) + .await? + .into_inner(); + + // The proof block should be nonexistent because we haven't finalized the in-progress + // block yet. + assert!(proof_block.block.is_none()); + + // Execute a block to finalize the proof block. + node.block().execute().await?; + // We should be able to get the block from the proof_height associated with // the proof and use the app_hash as the jmt root and succeed in proving: let proof_block: penumbra_proto::util::tendermint_proxy::v1::GetBlockByHeightResponse = @@ -289,49 +329,19 @@ async fn verify_storage_proof_simple() -> anyhow::Result<()> { .revision_height, proof_block.block.clone().unwrap().header.unwrap().height as u64 ); - // The node height when we directly retrieved the last app hash - // should match the proof height - assert_eq!( - ibc_client_state_response - .proof_height - .clone() - .unwrap() - .revision_height, - u64::from(latest_height) - ); - // TODO: these tests fail - if false { - // the proof block's app hash should match - assert_eq!( - node_last_app_hash, - proof_block.block.clone().unwrap().header.unwrap().app_hash, - "node claimed app hash for height {} was {}, however block header contained {}", - latest_height, - hex::encode(node_last_app_hash), - hex::encode(proof_block.block.clone().unwrap().header.unwrap().app_hash) - ); - println!( - "proof height: {} proof_block_root: {:?}", - ibc_client_state_response - .proof_height - .unwrap() - .revision_height, - hex::encode(proof_block.block.clone().unwrap().header.unwrap().app_hash) - ); - let proof_block_root = MerkleRoot { - hash: proof_block.block.unwrap().header.unwrap().app_hash, - }; - ibc_proof - .verify_membership( - &proof_specs, - proof_block_root, - merkle_path, - ibc_value.encode_to_vec(), - 0, - ) - .expect("the ibc proof should validate against the root of the proof_height's block"); - } + let proof_block_root = MerkleRoot { + hash: proof_block.block.unwrap().header.unwrap().app_hash, + }; + ibc_proof + .verify_membership( + &proof_specs, + proof_block_root, + merkle_path, + ibc_value.encode_to_vec(), + 0, + ) + .expect("the ibc proof should validate against the root of the proof_height's block"); Ok(()) .tap(|_| drop(node)) diff --git a/crates/core/component/ibc/src/component/rpc/client_query.rs b/crates/core/component/ibc/src/component/rpc/client_query.rs index 9b52f35097..2c4d66ec0a 100644 --- a/crates/core/component/ibc/src/component/rpc/client_query.rs +++ b/crates/core/component/ibc/src/component/rpc/client_query.rs @@ -11,6 +11,7 @@ use ibc_proto::ibc::core::client::v1::{ QueryUpgradedClientStateResponse, QueryUpgradedConsensusStateRequest, QueryUpgradedConsensusStateResponse, }; +use penumbra_sct::component::clock::EpochRead; use prost::Message; use ibc_types::core::client::ClientId; @@ -37,12 +38,6 @@ impl ClientQuery for IbcQuery { let snapshot = self.storage.latest_snapshot(); let client_id = ClientId::from_str(&request.get_ref().client_id) .map_err(|e| tonic::Status::invalid_argument(format!("invalid client id: {e}")))?; - let height = Height { - revision_number: HI::get_revision_number(&snapshot) - .await - .map_err(|e| tonic::Status::aborted(e.to_string()))?, - revision_height: snapshot.version(), - }; // Query for client_state and associated proof. let (cs_opt, proof) = snapshot @@ -60,11 +55,19 @@ impl ClientQuery for IbcQuery { .transpose() .map_err(|e| tonic::Status::aborted(format!("couldn't decode client state: {e}")))?; - let res = QueryClientStateResponse { - client_state, - proof: proof.encode_to_vec(), - proof_height: Some(height.into()), - }; + let res = + QueryClientStateResponse { + client_state, + proof: proof.encode_to_vec(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot.get_block_height().await.map_err(|e| { + tonic::Status::aborted(format!("couldn't decode height: {e}")) + })? + 1, + revision_number: HI::get_revision_number(&snapshot).await.map_err(|e| { + tonic::Status::aborted(format!("couldn't decode height: {e}")) + })?, + }), + }; Ok(tonic::Response::new(res)) } @@ -136,11 +139,19 @@ impl ClientQuery for IbcQuery { .transpose() .map_err(|e| tonic::Status::aborted(format!("couldn't decode consensus state: {e}")))?; - let res = QueryConsensusStateResponse { - consensus_state, - proof: proof.encode_to_vec(), - proof_height: Some(height.into()), - }; + let res = + QueryConsensusStateResponse { + consensus_state, + proof: proof.encode_to_vec(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot.get_block_height().await.map_err(|e| { + tonic::Status::aborted(format!("couldn't decode height: {e}")) + })? + 1, + revision_number: HI::get_revision_number(&snapshot).await.map_err(|e| { + tonic::Status::aborted(format!("couldn't decode height: {e}")) + })?, + }), + }; Ok(tonic::Response::new(res)) } diff --git a/crates/core/component/ibc/src/component/rpc/connection_query.rs b/crates/core/component/ibc/src/component/rpc/connection_query.rs index ae98d9f381..4f653fc755 100644 --- a/crates/core/component/ibc/src/component/rpc/connection_query.rs +++ b/crates/core/component/ibc/src/component/rpc/connection_query.rs @@ -16,6 +16,7 @@ use ibc_types::path::{ ClientConnectionPath, ClientConsensusStatePath, ClientStatePath, ConnectionPath, }; use ibc_types::DomainType; +use penumbra_sct::component::clock::EpochRead as _; use prost::Message; use std::str::FromStr; @@ -58,16 +59,19 @@ impl ConnectionQuery for IbcQuery let conn = conn.map_err(|e| tonic::Status::aborted(format!("couldn't decode connection: {e}")))?; - let height = Height { - revision_number: 0, - revision_height: snapshot.version(), - }; - - let res = QueryConnectionResponse { - connection: conn, - proof: proof.encode_to_vec(), - proof_height: Some(height), - }; + let res = + QueryConnectionResponse { + connection: conn, + proof: proof.encode_to_vec(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot.get_block_height().await.map_err(|e| { + tonic::Status::aborted(format!("couldn't decode height: {e}")) + })? + 1, + revision_number: HI::get_revision_number(&snapshot).await.map_err(|e| { + tonic::Status::aborted(format!("couldn't decode height: {e}")) + })?, + }), + }; Ok(tonic::Response::new(res)) } @@ -90,7 +94,7 @@ impl ConnectionQuery for IbcQuery _request: tonic::Request, ) -> std::result::Result, tonic::Status> { let snapshot = self.storage.latest_snapshot(); - let height = snapshot.version(); + let height = snapshot.version() + 1; let connection_counter = snapshot .get_connection_counter() @@ -163,9 +167,15 @@ impl ConnectionQuery for IbcQuery Ok(tonic::Response::new(QueryClientConnectionsResponse { connection_paths, proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot + .get_block_height() + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, }), })) } @@ -211,9 +221,15 @@ impl ConnectionQuery for IbcQuery Ok(tonic::Response::new(QueryConnectionClientStateResponse { identified_client_state: Some(identified_client_state), proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot + .get_block_height() + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, }), })) } @@ -263,9 +279,13 @@ impl ConnectionQuery for IbcQuery consensus_state: consensus_state_any, client_id: client_id.to_string(), proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot.get_block_height().await.map_err(|e| { + tonic::Status::aborted(format!("couldn't decode height: {e}")) + })? + 1, + revision_number: HI::get_revision_number(&snapshot).await.map_err(|e| { + tonic::Status::aborted(format!("couldn't decode height: {e}")) + })?, }), }, )) diff --git a/crates/core/component/ibc/src/component/rpc/consensus_query.rs b/crates/core/component/ibc/src/component/rpc/consensus_query.rs index 39221e2384..c37d654de4 100644 --- a/crates/core/component/ibc/src/component/rpc/consensus_query.rs +++ b/crates/core/component/ibc/src/component/rpc/consensus_query.rs @@ -25,6 +25,7 @@ use ibc_types::DomainType; use ibc_types::core::channel::{ChannelId, IdentifiedChannelEnd, PortId}; use ibc_types::core::connection::ConnectionId; +use penumbra_sct::component::clock::EpochRead as _; use prost::Message; use std::str::FromStr; @@ -67,14 +68,19 @@ impl ConsensusQuery for IbcQuery let channel = channel.map_err(|e| tonic::Status::aborted(format!("couldn't decode channel: {e}")))?; - let res = QueryChannelResponse { - channel, - proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), - }), - }; + let res = + QueryChannelResponse { + channel, + proof: proof.encode_to_vec(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot.get_block_height().await.map_err(|e| { + tonic::Status::aborted(format!("couldn't decode height: {e}")) + })? + 1, + revision_number: HI::get_revision_number(&snapshot).await.map_err(|e| { + tonic::Status::aborted(format!("couldn't decode height: {e}")) + })?, + }), + }; Ok(tonic::Response::new(res)) } @@ -251,9 +257,15 @@ impl ConsensusQuery for IbcQuery Ok(tonic::Response::new(QueryChannelClientStateResponse { identified_client_state: Some(identified_client_state), proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot + .get_block_height() + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, }), })) } @@ -338,9 +350,15 @@ impl ConsensusQuery for IbcQuery consensus_state: consensus_state_any, client_id: connection.client_id.clone().to_string(), proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot + .get_block_height() + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, }), })) } @@ -379,9 +397,15 @@ impl ConsensusQuery for IbcQuery Ok(tonic::Response::new(QueryPacketCommitmentResponse { commitment, proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot + .get_block_height() + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, }), })) } @@ -476,9 +500,15 @@ impl ConsensusQuery for IbcQuery Ok(tonic::Response::new(QueryPacketReceiptResponse { received: receipt.is_some(), proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot + .get_block_height() + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, }), })) } @@ -515,9 +545,15 @@ impl ConsensusQuery for IbcQuery Ok(tonic::Response::new(QueryPacketAcknowledgementResponse { acknowledgement, proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot + .get_block_height() + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, }), })) } @@ -708,9 +744,15 @@ impl ConsensusQuery for IbcQuery Ok(tonic::Response::new(QueryNextSequenceReceiveResponse { next_sequence_receive: next_recv_sequence, proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot + .get_block_height() + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, }), })) } @@ -744,9 +786,15 @@ impl ConsensusQuery for IbcQuery Ok(tonic::Response::new(QueryNextSequenceSendResponse { next_sequence_send: next_send_sequence, proof: proof.encode_to_vec(), - proof_height: Some(Height { - revision_number: 0, - revision_height: snapshot.version(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: snapshot + .get_block_height() + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, }), })) } diff --git a/crates/test/mock-consensus/src/block.rs b/crates/test/mock-consensus/src/block.rs index 2be7700835..76a8a77c93 100644 --- a/crates/test/mock-consensus/src/block.rs +++ b/crates/test/mock-consensus/src/block.rs @@ -190,7 +190,9 @@ where height }; - let last_commit = &test_node.last_commit; + // Pull the current last_commit out of the node, since it will + // be discarded after we build the block. + let last_commit = test_node.last_commit.clone(); // Set the validator set based on the current configuration. let pk = test_node @@ -219,8 +221,7 @@ where height, time: timestamp, // MerkleRoot of the lastCommit’s signatures. The signatures represent the validators that committed to the last block. The first block has an empty slices of bytes for the hash. - last_commit_hash: test_node - .last_commit + last_commit_hash: last_commit .as_ref() .map(|c| c.hash().unwrap()) .unwrap_or(Some( @@ -274,11 +275,13 @@ where last_commit_height=?last_commit.as_ref().map(|c| c.height.value()), "made block" ); - let block = Block::new(header.clone(), data, evidence, last_commit.clone())?; + // pass the current value of last_commit with this header + let block = Block::new(header.clone(), data, evidence, last_commit)?; // Now that the block is finalized, we can transition to the next block. // Generate a commit for the header we just made, that will be // included in the next header. + // Update the last_commit. test_node.last_commit = Some(Commit { height: block.header.height, round: Round::default(), diff --git a/crates/test/mock-consensus/src/lib.rs b/crates/test/mock-consensus/src/lib.rs index acaa435b80..79df9b4948 100644 --- a/crates/test/mock-consensus/src/lib.rs +++ b/crates/test/mock-consensus/src/lib.rs @@ -82,12 +82,8 @@ pub struct TestNode { last_app_hash: Vec, /// The last validator set hash value. last_validator_set_hash: Option, - /// The tendermint validators associated with the node. - /// Updated via processing updates within `BeginBlock` requests. - // pub validators: ValidatorSet, - /// The last tendermint commit. - // TODO: move the create_tendermint_header into TestNode and change vis on this - pub last_commit: Option, + /// The last tendermint block header commit value. + last_commit: Option, /// The consensus params hash. consensus_params_hash: Vec, /// The current block [`Height`][tendermint::block::Height]. @@ -127,6 +123,16 @@ impl TestNode { &self.last_app_hash } + /// Returns the last `commit` value. + pub fn last_commit(&self) -> Option<&Commit> { + self.last_commit.as_ref() + } + + /// Returns the last `validator_set_hash` value. + pub fn last_validator_set_hash(&self) -> Option<&tendermint::Hash> { + self.last_validator_set_hash.as_ref() + } + /// Returns the most recent `timestamp` value. pub fn timestamp(&self) -> &Time { &self.timestamp From 7e3a4289a5327ed0fa3044755ababfa91fd7ee2e Mon Sep 17 00:00:00 2001 From: Mason Yonkers Date: Fri, 23 Aug 2024 08:17:09 -0700 Subject: [PATCH 02/43] No longer display withdrawn auction NFTs when viewing balance in pcli (#4826) ## Describe your changes pcli no longer displays withdrawn auction NFTs when running ```pcli view balance``` ## Issue ticket number and link #4820 ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: I have only modified what pcli filters out before displaying balances and added a function in denom_metadata that matches a similar function for LPNFTs. > After completing a number of auctions, viewing my balances through pcli has become increasingly difficult due to all of my previously withdrawn auction NFTs. I decided to not filter out closed auction positions so they can serve as a reminder to withdraw them, matching the default behavior of the swap frontend. --- crates/bin/pcli/src/command/view/balance.rs | 6 ++++-- crates/core/asset/src/asset/denom_metadata.rs | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/bin/pcli/src/command/view/balance.rs b/crates/bin/pcli/src/command/view/balance.rs index d6a04139f7..b8946fe1ef 100644 --- a/crates/bin/pcli/src/command/view/balance.rs +++ b/crates/bin/pcli/src/command/view/balance.rs @@ -80,10 +80,12 @@ impl BalanceCmd { (*index, asset.value(sum.into())) }) }) - // Exclude withdrawn LPNFTs. + // Exclude withdrawn LPNFTs and withdrawn auction NFTs. .filter(|(_, value)| match asset_cache.get(&value.asset_id) { None => true, - Some(denom) => !denom.is_withdrawn_position_nft(), + Some(denom) => { + !denom.is_withdrawn_position_nft() && !denom.is_withdrawn_auction_nft() + } }); for (index, value) in rows { diff --git a/crates/core/asset/src/asset/denom_metadata.rs b/crates/core/asset/src/asset/denom_metadata.rs index 8b2b489f59..4e059468dc 100644 --- a/crates/core/asset/src/asset/denom_metadata.rs +++ b/crates/core/asset/src/asset/denom_metadata.rs @@ -345,6 +345,10 @@ impl Metadata { self.starts_with("auctionnft_") } + pub fn is_withdrawn_auction_nft(&self) -> bool { + self.starts_with("auctionnft_2") + } + pub fn is_opened_position_nft(&self) -> bool { let prefix = "lpnft_opened_".to_string(); From 96af7ebf52fd2ac646b8ac5bf2b5bbf7a45098ea Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Thu, 22 Aug 2024 17:00:39 -0700 Subject: [PATCH 03/43] fix(tests): bind to unique ports in ibc tests Follow-up to #4747. Noticed the new tests were flaking across multiple runs. Turns out they were fighting for port binds, as described already in #4517. Let's just use a unique port pair for the MockRelayers and call it a day for now. --- crates/core/app/tests/common/ibc_tests/node.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/core/app/tests/common/ibc_tests/node.rs b/crates/core/app/tests/common/ibc_tests/node.rs index bbe4edb122..5026bd7c2a 100644 --- a/crates/core/app/tests/common/ibc_tests/node.rs +++ b/crates/core/app/tests/common/ibc_tests/node.rs @@ -96,7 +96,9 @@ impl TestNodeWithIBC { "b" => 1, _ => unreachable!("update this hack"), }; - let grpc_url = format!("http://127.0.0.1:808{}", index) // see #4517 + // We use a non-standard port range, to avoid conflicting with other + // integration tests that bind to the more typical 8080/8081 ports. + let grpc_url = format!("http://127.0.0.1:999{}", index) // see #4517 .parse::()? .tap(|url| tracing::debug!(%url, "parsed grpc url")); From 7e9565db611a76d3d09e893880b8fca2cd5c8658 Mon Sep 17 00:00:00 2001 From: Chris Czub Date: Fri, 23 Aug 2024 12:35:19 -0400 Subject: [PATCH 04/43] Perform channel handshake in IBC state machine tests --- .../core/app/tests/common/ibc_tests/node.rs | 23 +- .../app/tests/common/ibc_tests/relayer.rs | 577 +++++++++++++++++- crates/core/app/tests/ibc_handshake.rs | 2 +- 3 files changed, 584 insertions(+), 18 deletions(-) diff --git a/crates/core/app/tests/common/ibc_tests/node.rs b/crates/core/app/tests/common/ibc_tests/node.rs index 5026bd7c2a..5178c7dd3f 100644 --- a/crates/core/app/tests/common/ibc_tests/node.rs +++ b/crates/core/app/tests/common/ibc_tests/node.rs @@ -10,8 +10,11 @@ use { }, ibc_types::{ core::{ + channel::{ChannelEnd, ChannelId, PortId, Version as ChannelVersion}, client::{ClientId, ClientType, Height}, - connection::{ChainId, ConnectionEnd, ConnectionId, Counterparty, Version}, + connection::{ + ChainId, ConnectionEnd, ConnectionId, Counterparty, Version as ConnectionVersion, + }, }, lightclients::tendermint::{ consensus_state::ConsensusState, header::Header as TendermintHeader, @@ -46,17 +49,21 @@ use { #[allow(unused)] pub struct TestNodeWithIBC { pub connection_id: ConnectionId, + pub channel_id: ChannelId, pub client_id: ClientId, + pub port_id: PortId, pub chain_id: String, pub counterparty: Counterparty, - pub version: Version, + pub connection_version: ConnectionVersion, + pub channel_version: ChannelVersion, pub signer: String, pub connection: Option, + pub channel: Option, pub node: TestNode>>, pub storage: TempStorage, pub ibc_client_query_client: IbcClientQueryClient, pub ibc_connection_query_client: IbcConnectionQueryClient, - pub _ibc_channel_query_client: IbcChannelQueryClient, + pub ibc_channel_query_client: IbcChannelQueryClient, pub tendermint_proxy_service_client: TendermintProxyServiceClient, } @@ -151,6 +158,10 @@ impl TestNodeWithIBC { Ok(Self { // the test relayer supports only a single connection on each chain as of now connection_id: ConnectionId::new(0), + // the test relayer supports only a single channel per connection on each chain as of now + channel_id: ChannelId::new(0), + // Only ICS20 transfers are supported + port_id: PortId::transfer(), node, storage, client_id: ClientId::new(ClientType::new("07-tendermint".to_string()), 0)?, @@ -160,11 +171,13 @@ impl TestNodeWithIBC { connection_id: None, prefix: IBC_COMMITMENT_PREFIX.to_owned(), }, - version: Version::default(), + connection_version: ConnectionVersion::default(), + channel_version: ChannelVersion::new("ics20-1".to_string()), signer: hex::encode_upper(proposer_address), connection: None, + channel: None, ibc_connection_query_client, - _ibc_channel_query_client: ibc_channel_query_client, + ibc_channel_query_client, ibc_client_query_client, tendermint_proxy_service_client, }) diff --git a/crates/core/app/tests/common/ibc_tests/relayer.rs b/crates/core/app/tests/common/ibc_tests/relayer.rs index 86705d770d..05a73e1f59 100644 --- a/crates/core/app/tests/common/ibc_tests/relayer.rs +++ b/crates/core/app/tests/common/ibc_tests/relayer.rs @@ -2,12 +2,19 @@ use { super::TestNodeWithIBC, anyhow::{anyhow, Result}, ibc_proto::ibc::core::{ + channel::v1::{IdentifiedChannel, QueryChannelRequest, QueryConnectionChannelsRequest}, client::v1::{QueryClientStateRequest, QueryConsensusStateRequest}, connection::v1::QueryConnectionRequest, }, - ibc_types::lightclients::tendermint::client_state::ClientState as TendermintClientState, ibc_types::{ core::{ + channel::{ + channel::{Order, State as ChannelState}, + msgs::{ + MsgChannelOpenAck, MsgChannelOpenConfirm, MsgChannelOpenInit, MsgChannelOpenTry, + }, + IdentifiedChannelEnd, Version as ChannelVersion, + }, client::{ msgs::{MsgCreateClient, MsgUpdateClient}, Height, @@ -18,22 +25,27 @@ use { MsgConnectionOpenAck, MsgConnectionOpenConfirm, MsgConnectionOpenInit, MsgConnectionOpenTry, }, - ConnectionEnd, Counterparty, State as ConnectionState, Version, + ConnectionEnd, Counterparty, State as ConnectionState, + Version as ConnectionVersion, }, }, lightclients::tendermint::{ - client_state::AllowUpdate, consensus_state::ConsensusState, - header::Header as TendermintHeader, TrustThreshold, + client_state::{AllowUpdate, ClientState as TendermintClientState}, + consensus_state::ConsensusState, + header::Header as TendermintHeader, + TrustThreshold, }, DomainType as _, }, penumbra_ibc::{ - component::ConnectionStateReadExt as _, IbcRelay, IBC_COMMITMENT_PREFIX, IBC_PROOF_SPECS, + component::{ChannelStateReadExt as _, ConnectionStateReadExt as _}, + IbcRelay, IBC_COMMITMENT_PREFIX, IBC_PROOF_SPECS, }, penumbra_proto::{util::tendermint_proxy::v1::GetBlockByHeightRequest, DomainType}, penumbra_stake::state_key::chain, penumbra_transaction::{TransactionParameters, TransactionPlan}, prost::Message as _, + rand_chacha::ChaCha12Core, sha2::Digest, std::time::Duration, tendermint::Time, @@ -90,7 +102,64 @@ impl MockRelayer { ) } + pub async fn get_channel_states(&mut self) -> Result<(ChannelState, ChannelState)> { + let channel_on_a_response = self + .chain_a_ibc + .ibc_channel_query_client + .connection_channels(QueryConnectionChannelsRequest { + connection: self.chain_a_ibc.connection_id.to_string(), + pagination: None, + }) + .await? + .into_inner(); + let channel_on_b_response = self + .chain_b_ibc + .ibc_channel_query_client + .connection_channels(QueryConnectionChannelsRequest { + connection: self.chain_b_ibc.connection_id.to_string(), + pagination: None, + }) + .await? + .into_inner(); + + let channels_a = channel_on_a_response.channels; + let channels_b = channel_on_b_response.channels; + + // Note: Mock relayer expects only a single channel per connection right now + let channel_a_state = match channels_a.len() { + 0 => ChannelState::Uninitialized, + _ => { + let channel_a: IdentifiedChannelEnd = channels_a[0].clone().try_into().unwrap(); + channel_a.channel_end.state.try_into()? + } + }; + let channel_b_state = match channels_b.len() { + 0 => ChannelState::Uninitialized, + _ => { + let channel_b: IdentifiedChannelEnd = channels_b[0].clone().try_into().unwrap(); + channel_b.channel_end.state.try_into()? + } + }; + + Ok((channel_a_state, channel_b_state)) + } + + /// Performs a connection handshake followed by a channel handshake + /// between the two chains owned by the mock relayer. pub async fn _handshake(&mut self) -> Result<(), anyhow::Error> { + // Perform connection handshake + self._connection_handshake().await?; + + // Perform channel handshake + self._channel_handshake().await?; + + // The two chains should now be able to perform IBC transfers + // between each other. + Ok(()) + } + + /// Establish a connection between the two chains owned by the mock relayer. + pub async fn _connection_handshake(&mut self) -> Result<(), anyhow::Error> { // The IBC connection handshake has four steps (Init, Try, Ack, Confirm). // https://github.com/penumbra-zone/hermes/blob/a34a11fec76de3b573b539c237927e79cb74ec00/crates/relayer/src/connection.rs#L672 // https://github.com/cosmos/ibc/blob/main/spec/core/ics-003-connection-semantics/README.md#opening-handshake @@ -104,7 +173,7 @@ impl MockRelayer { // 1: send the Init message to chain A { - tracing::info!("Send Init to chain A"); + tracing::info!("Send Connection Init to chain A"); self._build_and_send_connection_open_init().await?; } @@ -115,7 +184,7 @@ impl MockRelayer { // 2. send the OpenTry message to chain B { - tracing::info!("send OpenTry to chain B"); + tracing::info!("send Connection OpenTry to chain B"); self._build_and_send_connection_open_try().await?; } @@ -126,7 +195,7 @@ impl MockRelayer { // 3. Send the OpenAck message to chain A { - tracing::info!("send OpenAck to chain A"); + tracing::info!("send Connection OpenAck to chain A"); self._build_and_send_connection_open_ack().await?; } @@ -137,7 +206,7 @@ impl MockRelayer { // 4. Send the OpenConfirm message to chain B { - tracing::info!("send OpenConfirm to chain B"); + tracing::info!("send Connection OpenConfirm to chain B"); self._build_and_send_connection_open_confirm().await?; } @@ -150,6 +219,65 @@ impl MockRelayer { Ok(()) } + /// Establish a channel between the two chains owned by the mock relayer. + pub async fn _channel_handshake(&mut self) -> Result<(), anyhow::Error> { + // The IBC channel handshake has four steps (Init, Try, Ack, Confirm). + // https://github.com/penumbra-zone/hermes/blob/a34a11fec76de3b573b539c237927e79cb74ec00/crates/relayer/src/channel.rs#L712 + // https://github.com/cosmos/ibc/blob/main/spec/core/ics-004-channel-and-packet-semantics/README.md + + self._sync_chains().await?; + + let (a_state, b_state) = self.get_channel_states().await?; + assert!(a_state == ChannelState::Uninitialized && b_state == ChannelState::Uninitialized); + + // 1: send the Init message to chain A + { + tracing::info!("Send Channel Init to chain A"); + self._build_and_send_channel_open_init().await?; + } + + let (a_state, b_state) = self.get_channel_states().await?; + assert!(a_state == ChannelState::Init && b_state == ChannelState::Uninitialized); + + self._sync_chains().await?; + + // 2. send the OpenTry message to chain B + { + tracing::info!("send Channel OpenTry to chain B"); + self._build_and_send_channel_open_try().await?; + } + + let (a_state, b_state) = self.get_channel_states().await?; + assert!(a_state == ChannelState::Init && b_state == ChannelState::TryOpen); + + self._sync_chains().await?; + + // 3. Send the OpenAck message to chain A + { + tracing::info!("send Channel OpenAck to chain A"); + self._build_and_send_channel_open_ack().await?; + } + + let (a_state, b_state) = self.get_channel_states().await?; + assert!(a_state == ChannelState::Open && b_state == ChannelState::TryOpen); + + self._sync_chains().await?; + + // 4. Send the OpenConfirm message to chain B + { + tracing::info!("send Channel OpenConfirm to chain B"); + self._build_and_send_channel_open_confirm().await?; + } + + let (a_state, b_state) = self.get_channel_states().await?; + assert!(a_state == ChannelState::Open && b_state == ChannelState::Open); + + // Ensure the chain timestamps remain in sync + self._sync_chains().await?; + + Ok(()) + } + pub async fn _create_clients(&mut self) -> Result<(), anyhow::Error> { self._sync_chains().await?; // helper function to create client for chain B on chain A @@ -259,7 +387,7 @@ impl MockRelayer { let ibc_msg = IbcRelay::ConnectionOpenInit(MsgConnectionOpenInit { client_id_on_a: chain_a_ibc.client_id.clone(), counterparty: chain_a_ibc.counterparty.clone(), - version: Some(chain_a_ibc.version.clone()), + version: Some(chain_a_ibc.connection_version.clone()), delay_period: Duration::from_secs(1), signer: chain_b_ibc.signer.clone(), }) @@ -318,6 +446,217 @@ impl MockRelayer { Ok(()) } + // helper function to build ChannelOpenTry to chain B + pub async fn _build_and_send_channel_open_try(&mut self) -> Result<()> { + // This is a load-bearing block execution that should be removed + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._sync_chains().await?; + + let src_connection = self + .chain_a_ibc + .ibc_connection_query_client + .connection(QueryConnectionRequest { + connection_id: self.chain_a_ibc.connection_id.to_string(), + }) + .await? + .into_inner(); + + let chain_b_height = self._build_and_send_update_client_a().await?; + let chain_a_height = self._build_and_send_update_client_b().await?; + + let chan_end_on_a_response = self + .chain_a_ibc + .ibc_channel_query_client + .channel(QueryChannelRequest { + port_id: self.chain_a_ibc.port_id.to_string(), + channel_id: self.chain_a_ibc.channel_id.to_string(), + }) + .await? + .into_inner(); + + let proof_chan_end_on_a = + MerkleProof::decode(chan_end_on_a_response.clone().proof.as_slice())?; + + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._sync_chains().await?; + + let proof_height_on_a: Height = chan_end_on_a_response + .proof_height + .clone() + .unwrap() + .try_into()?; + + self._build_and_send_update_client_b().await?; + self._sync_chains().await?; + + let plan = { + // This mocks the relayer constructing a channel open try message on behalf + // of the counterparty chain. + #[allow(deprecated)] + let ibc_msg = IbcRelay::ChannelOpenTry(MsgChannelOpenTry { + signer: self.chain_a_ibc.signer.clone(), + port_id_on_b: self.chain_b_ibc.port_id.clone(), + connection_hops_on_b: vec![self.chain_b_ibc.connection_id.clone()], + port_id_on_a: self.chain_a_ibc.port_id.clone(), + chan_id_on_a: self.chain_a_ibc.channel_id.clone(), + version_supported_on_a: self.chain_a_ibc.channel_version.clone(), + proof_chan_end_on_a, + proof_height_on_a, + // Ordering must be Unordered for ics20 transfer + ordering: Order::Unordered, + // Deprecated + previous_channel_id: self.chain_a_ibc.channel_id.to_string(), + // Deprecated: Only ics20 version is supported + version_proposal: ChannelVersion::new("ics20-1".to_string()), + }) + .into(); + TransactionPlan { + actions: vec![ibc_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_b_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + let tx = self + .chain_b_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + // Execute the transaction, applying it to the chain state. + let pre_tx_snapshot = self.chain_b_ibc.storage.latest_snapshot(); + + // validate the chain b pre-tx storage root hash is what we expect: + let pre_tx_hash = pre_tx_snapshot.root_hash().await?; + + // Validate the tx hash is what we expect: + let tx_hash = sha2::Sha256::digest(&tx.encode_to_vec()); + + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + + // execute the transaction containing the opentry message + self.chain_b_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + self.chain_b_ibc.node.block().execute().await?; + let post_tx_snapshot = self.chain_b_ibc.storage.latest_snapshot(); + + // validate the channel state is now "tryopen" + { + // Channel should not exist pre-commit + assert!(pre_tx_snapshot + .get_channel(&self.chain_b_ibc.channel_id, &self.chain_b_ibc.port_id) + .await? + .is_none(),); + + // Post-commit, the connection should be in the "tryopen" state. + let channel = post_tx_snapshot + .get_channel(&self.chain_b_ibc.channel_id, &self.chain_b_ibc.port_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no channel with the specified ID {} exists", + &self.chain_b_ibc.channel_id + ) + })?; + + assert_eq!(channel.state, ChannelState::TryOpen); + + self.chain_b_ibc.channel = Some(channel); + } + + self._sync_chains().await?; + + Ok(()) + } + + // helper function to build ChannelOpenInit to chain A + pub async fn _build_and_send_channel_open_init(&mut self) -> Result<()> { + self._sync_chains().await?; + let chain_a_ibc = &mut self.chain_a_ibc; + let chain_b_ibc = &mut self.chain_b_ibc; + + let plan = { + let ibc_msg = IbcRelay::ChannelOpenInit(MsgChannelOpenInit { + signer: chain_b_ibc.signer.clone(), + port_id_on_a: chain_a_ibc.port_id.clone(), + connection_hops_on_a: vec![chain_b_ibc + .counterparty + .connection_id + .clone() + .expect("connection established")], + port_id_on_b: chain_b_ibc.port_id.clone(), + // ORdering must be unordered for Ics20 transfer + ordering: Order::Unordered, + // Only ics20 version is supported + version_proposal: ChannelVersion::new("ics20-1".to_string()), + }) + .into(); + TransactionPlan { + actions: vec![ibc_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: chain_a_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + let tx = chain_a_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + // Execute the transaction, applying it to the chain state. + let pre_tx_snapshot = chain_a_ibc.storage.latest_snapshot(); + chain_a_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + let post_tx_snapshot = chain_a_ibc.storage.latest_snapshot(); + + // validate the connection state is now "init" + { + // Channel should not exist pre-commit + assert!(pre_tx_snapshot + .get_channel(&chain_a_ibc.channel_id, &chain_a_ibc.port_id) + .await? + .is_none(),); + + // Post-commit, the channel should be in the "init" state. + let channel = post_tx_snapshot + .get_channel(&chain_a_ibc.channel_id, &chain_a_ibc.port_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no channel with the specified ID {} exists", + &chain_a_ibc.channel_id + ) + })?; + + assert_eq!(channel.state.clone(), ChannelState::Init); + + chain_a_ibc.channel = Some(channel.clone()); + } + + Ok(()) + } + pub async fn handshake(&mut self) -> Result<(), anyhow::Error> { // Open a connection on each chain to the other chain. // This is accomplished by following the ICS-003 spec for connection handshakes. @@ -378,6 +717,120 @@ impl MockRelayer { _build_and_send_update_client(chain_a_ibc, chain_b_ibc).await } + // Send an ACK message to chain A + pub async fn _build_and_send_channel_open_ack(&mut self) -> Result<()> { + // This is a load-bearing block execution that should be removed + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._sync_chains().await?; + + let chain_b_connection_id = self.chain_b_ibc.connection_id.clone(); + let chain_a_connection_id = self.chain_a_ibc.connection_id.clone(); + + // Build message(s) for updating client on source + let src_client_height = self._build_and_send_update_client_a().await?; + // Build message(s) for updating client on destination + let dst_client_height = self._build_and_send_update_client_b().await?; + + let chan_end_on_b_response = self + .chain_b_ibc + .ibc_channel_query_client + .channel(QueryChannelRequest { + port_id: self.chain_b_ibc.port_id.to_string(), + channel_id: self.chain_b_ibc.channel_id.to_string(), + }) + .await? + .into_inner(); + + let proof_height_on_b = chan_end_on_b_response + .clone() + .proof_height + .expect("proof height should be present") + .try_into()?; + let proof_chan_end_on_b = + MerkleProof::decode(chan_end_on_b_response.clone().proof.as_slice())?; + + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._build_and_send_update_client_a().await?; + self._sync_chains().await?; + + let plan = { + // This mocks the relayer constructing a channel open try message on behalf + // of the counterparty chain. + let ibc_msg = IbcRelay::ChannelOpenAck(MsgChannelOpenAck { + port_id_on_a: self.chain_a_ibc.port_id.clone(), + chan_id_on_a: self.chain_a_ibc.channel_id.clone(), + chan_id_on_b: self.chain_b_ibc.channel_id.clone(), + version_on_b: self.chain_b_ibc.channel_version.clone(), + proof_chan_end_on_b, + proof_height_on_b, + signer: self.chain_b_ibc.signer.clone(), + }) + .into(); + // let ibc_msg = IbcRelay::ChannelOpenAck(MsgChannelOpenAck::try_from(proto_ack)?).into(); + TransactionPlan { + actions: vec![ibc_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_a_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + let tx = self + .chain_a_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + // Execute the transaction, applying it to the chain state. + let pre_tx_snapshot = self.chain_a_ibc.storage.latest_snapshot(); + self.chain_a_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + let post_tx_snapshot = self.chain_a_ibc.storage.latest_snapshot(); + + // validate the channel state is now "OPEN" + { + // Channel should be in INIT pre-commit + let channel = pre_tx_snapshot + .get_channel(&self.chain_a_ibc.channel_id, &self.chain_a_ibc.port_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no channel with the specified ID {} exists", + &self.chain_a_ibc.channel_id + ) + })?; + + assert_eq!(channel.state, ChannelState::Init); + + // Post-commit, the channel should be in the "OPEN" state. + let channel = post_tx_snapshot + .get_channel(&self.chain_a_ibc.channel_id, &self.chain_a_ibc.port_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no channelwith the specified ID {} exists", + &self.chain_a_ibc.channel_id + ) + })?; + + assert_eq!(channel.state, ChannelState::Open); + + self.chain_a_ibc.channel = Some(channel); + } + + Ok(()) + } + // Send an ACK message to chain A // https://github.com/penumbra-zone/hermes/blob/a34a11fec76de3b573b539c237927e79cb74ec00/crates/relayer/src/connection.rs#L1126 pub async fn _build_and_send_connection_open_ack(&mut self) -> Result<()> { @@ -445,7 +898,7 @@ impl MockRelayer { let proto_ack = ibc_proto::ibc::core::connection::v1::MsgConnectionOpenAck { connection_id: self.chain_a_ibc.connection_id.to_string(), counterparty_connection_id: chain_b_connection_id.to_string(), - version: Some(Version::default().into()), + version: Some(ConnectionVersion::default().into()), client_state: Some( client_state_of_a_on_b_response .clone() @@ -643,7 +1096,7 @@ impl MockRelayer { client_state_of_b_on_a: client_state_of_b_on_a_response .client_state .expect("client state present"), - versions_on_a: vec![Version::default()], + versions_on_a: vec![ConnectionVersion::default()], proof_conn_end_on_a, proof_client_state_of_b_on_a, proof_consensus_state_of_b_on_a, @@ -827,6 +1280,106 @@ impl MockRelayer { Ok(()) } + + // sends a ChannelOpenConfirm message to chain B + // at this point, chain A is in OPEN and B is in TRYOPEN. + // afterwards, chain A will be in OPEN and chain B will be in OPEN. + pub async fn _build_and_send_channel_open_confirm(&mut self) -> Result<()> { + // This is a load-bearing block execution that should be removed + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._sync_chains().await?; + + // https://github.com/penumbra-zone/hermes/blob/a34a11fec76de3b573b539c237927e79cb74ec00/crates/relayer/src/connection.rs#L1296 + let chan_end_on_a_response = self + .chain_a_ibc + .ibc_channel_query_client + .channel(QueryChannelRequest { + port_id: self.chain_a_ibc.port_id.to_string(), + channel_id: self.chain_a_ibc.channel_id.to_string(), + }) + .await? + .into_inner(); + + let dst_client_target_height = self._build_and_send_update_client_b().await?; + + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._build_and_send_update_client_b().await?; + self._sync_chains().await?; + + let plan = { + // This mocks the relayer constructing a channel open confirm message on behalf + // of the counterparty chain. + let ibc_msg = IbcRelay::ChannelOpenConfirm(MsgChannelOpenConfirm { + proof_height_on_a: chan_end_on_a_response.proof_height.unwrap().try_into()?, + signer: self.chain_a_ibc.signer.clone(), + port_id_on_b: self.chain_b_ibc.port_id.clone(), + chan_id_on_b: self.chain_b_ibc.channel_id.clone(), + proof_chan_end_on_a: MerkleProof::decode(chan_end_on_a_response.proof.as_slice())?, + }) + .into(); + TransactionPlan { + actions: vec![ibc_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_b_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + let tx = self + .chain_b_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + // Execute the transaction, applying it to the chain state. + let pre_tx_snapshot = self.chain_b_ibc.storage.latest_snapshot(); + self.chain_b_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + let post_tx_snapshot = self.chain_b_ibc.storage.latest_snapshot(); + + // validate the channel state is now "open" + { + // Channel should be in TRYOPEN pre-commit + let channel = pre_tx_snapshot + .get_channel(&self.chain_b_ibc.channel_id, &self.chain_b_ibc.port_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no channel with the specified ID {} exists", + &self.chain_b_ibc.channel_id + ) + })?; + + assert_eq!(channel.state, ChannelState::TryOpen); + + // Post-commit, the channel should be in the "OPEN" state. + let channel = post_tx_snapshot + .get_channel(&self.chain_b_ibc.channel_id, &self.chain_b_ibc.port_id) + .await? + .ok_or_else(|| { + anyhow::anyhow!( + "no channel with the specified ID {} exists", + &self.chain_b_ibc.channel_id + ) + })?; + + assert_eq!(channel.state, ChannelState::Open); + + self.chain_b_ibc.channel = Some(channel); + } + + Ok(()) + } } // tell chain A about chain B. returns the height of chain b on chain a after update. diff --git a/crates/core/app/tests/ibc_handshake.rs b/crates/core/app/tests/ibc_handshake.rs index e000a824eb..2068d9d6b8 100644 --- a/crates/core/app/tests/ibc_handshake.rs +++ b/crates/core/app/tests/ibc_handshake.rs @@ -69,7 +69,7 @@ async fn ibc_handshake() -> anyhow::Result<()> { chain_b_ibc, }; - // Perform the IBC connection handshake between the two chains. + // Perform the IBC connection and channel handshakes between the two chains. // TODO: some testing of failure cases of the handshake process would be good relayer.handshake().await?; From b85801608811f5b14f1dc399628847cce02d78e8 Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Fri, 23 Aug 2024 12:52:30 -0700 Subject: [PATCH 05/43] feat(pd): dynamic addresses in devnet allocations (#4827) --- crates/bin/pd/src/cli.rs | 8 ++++++- crates/bin/pd/src/main.rs | 2 ++ crates/bin/pd/src/network/generate.rs | 30 +++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/crates/bin/pd/src/cli.rs b/crates/bin/pd/src/cli.rs index 9ca4226eed..6583a5fccd 100644 --- a/crates/bin/pd/src/cli.rs +++ b/crates/bin/pd/src/cli.rs @@ -167,8 +167,14 @@ pub enum NetworkCommand { /// Path to CSV file containing initial allocations [default: latest testnet]. #[clap(long, parse(from_os_str))] allocations_input_file: Option, - /// Path to JSON file containing initial validator configs [default: latest testnet]. + /// Penumbra wallet address to include in genesis allocations. + /// Intended to make dev experience nicer on first run: + /// generate a wallet, view its address, then generate a devnet + /// with that address included in the base allocations. + #[clap(long)] + allocation_address: Option, #[clap(long, parse(from_os_str))] + /// Path to JSON file containing initial validator configs [default: latest testnet]. validators_input_file: Option, /// Testnet name [default: latest testnet]. #[clap(long)] diff --git a/crates/bin/pd/src/main.rs b/crates/bin/pd/src/main.rs index a203d8f175..4c2f003ae4 100644 --- a/crates/bin/pd/src/main.rs +++ b/crates/bin/pd/src/main.rs @@ -319,6 +319,7 @@ async fn main() -> anyhow::Result<()> { unbonding_delay, active_validator_limit, allocations_input_file, + allocation_address, validators_input_file, chain_id, gas_price_simple, @@ -377,6 +378,7 @@ async fn main() -> anyhow::Result<()> { peer_address_template, Some(external_addresses), allocations_input_file, + allocation_address, validators_input_file, timeout_commit, active_validator_limit, diff --git a/crates/bin/pd/src/network/generate.rs b/crates/bin/pd/src/network/generate.rs index 113e3b5bb2..9208c20920 100644 --- a/crates/bin/pd/src/network/generate.rs +++ b/crates/bin/pd/src/network/generate.rs @@ -68,6 +68,7 @@ impl NetworkConfig { peer_address_template: Option, external_addresses: Option>, allocations_input_file: Option, + allocation_address: Option
, validators_input_file: Option, tendermint_timeout_commit: Option, active_validator_limit: Option, @@ -90,6 +91,11 @@ impl NetworkConfig { allocations.push(v.delegation_allocation()?); } + // Add an extra allocation for a dynamic wallet address. + if let Some(address) = allocation_address { + tracing::info!(%address, "adding dynamic allocation to genesis"); + allocations.extend(NetworkAllocation::simple(address)); + } // Convert to domain type, for use with other Penumbra interfaces. // We do this conversion once and store it in the struct for convenience. let validators: anyhow::Result> = @@ -390,6 +396,7 @@ pub fn network_generate( external_addresses: Vec, validators_input_file: Option, allocations_input_file: Option, + allocation_address: Option
, proposal_voting_blocks: Option, gas_price_simple: Option, ) -> anyhow::Result<()> { @@ -400,6 +407,7 @@ pub fn network_generate( peer_address_template, Some(external_addresses), allocations_input_file, + allocation_address, validators_input_file, tendermint_timeout_commit, active_validator_limit, @@ -456,6 +464,26 @@ impl NetworkAllocation { Ok(res) } + /// Creates a basic set of genesis [Allocation]s for the provided [Address]. + /// Returns multiple Allocations, so that it's immediately possible to use the DEX, + /// for basic interactive testing of swap behavior. + /// For more control over precise allocation amounts, use [from_csv]. + pub fn simple(address: Address) -> Vec { + vec![ + Allocation { + address: address.clone(), + raw_denom: "upenumbra".into(), + // The `upenumbra` base denom is millionths, so `10^6 * n` + // results in `n` `penumbra` tokens. + raw_amount: (100_000 * 10u128.pow(6)).into(), + }, + Allocation { + address: address.clone(), + raw_denom: "test_usd".into(), + raw_amount: (1_000 as u128).into(), + }, + ] + } } /// Represents a funding stream within a testnet configuration file. @@ -729,6 +757,7 @@ mod tests { None, None, None, + None, )?; assert_eq!(testnet_config.name, "test-chain-1234"); assert_eq!(testnet_config.genesis.validators.len(), 0); @@ -752,6 +781,7 @@ mod tests { Some(String::from("validator.local")), None, None, + None, Some(ci_validators_filepath), None, None, From 87cb8aa04cdc3946a203b8e5d2eeff95643e2868 Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Tue, 27 Aug 2024 09:37:48 -0700 Subject: [PATCH 06/43] chore: update minifront bundled assets Updated to latest minifront and node-status apps from the penumbra-zone/web repo, commit a8a5f41f7d791059e47f873c6948a26a057e674b. --- assets/minifront.zip | 4 ++-- assets/node-status.zip | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/assets/minifront.zip b/assets/minifront.zip index 803e274756..5837696af4 100644 --- a/assets/minifront.zip +++ b/assets/minifront.zip @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74c9d6395a48de4fb6fede856cb48f76ebb88f56472c79e2974bef0d5e2c2511 -size 5150122 +oid sha256:620aca062812b68edb1635deb74c62e340181476510b63daee53195178757f20 +size 5253302 diff --git a/assets/node-status.zip b/assets/node-status.zip index 14613f2b90..6c235c4a73 100644 --- a/assets/node-status.zip +++ b/assets/node-status.zip @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ef586037dd4f66d25bd3867be14dc53383d9d20f0a39b78faec2653c27783106 -size 1384181 +oid sha256:3bf95883f7466b7247d455d2e830c1754dd0c45cf6a3cf78e0f0d83fb9d202fc +size 1240860 From 3d6a557f48b88f6f1580f3cb57627cfb260df155 Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Tue, 27 Aug 2024 11:16:49 -0700 Subject: [PATCH 07/43] chore: release version 0.80.3 --- Cargo.lock | 96 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 49 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e0ab44877a..c8f77b85b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1233,7 +1233,7 @@ dependencies = [ [[package]] name = "cnidarium" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "async-trait", @@ -1269,7 +1269,7 @@ dependencies = [ [[package]] name = "cnidarium-component" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "async-trait", @@ -1307,7 +1307,7 @@ dependencies = [ [[package]] name = "cometindex" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "async-trait", @@ -1668,7 +1668,7 @@ dependencies = [ [[package]] name = "decaf377-fmd" -version = "0.80.2" +version = "0.80.3" dependencies = [ "ark-ff", "ark-serialize", @@ -1683,7 +1683,7 @@ dependencies = [ [[package]] name = "decaf377-frost" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -1698,7 +1698,7 @@ dependencies = [ [[package]] name = "decaf377-ka" -version = "0.80.2" +version = "0.80.3" dependencies = [ "ark-ff", "decaf377", @@ -4213,7 +4213,7 @@ dependencies = [ [[package]] name = "pcli" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -4295,7 +4295,7 @@ dependencies = [ [[package]] name = "pclientd" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "assert_cmd", @@ -4347,7 +4347,7 @@ dependencies = [ [[package]] name = "pd" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -4500,7 +4500,7 @@ dependencies = [ [[package]] name = "penumbra-app" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -4588,7 +4588,7 @@ dependencies = [ [[package]] name = "penumbra-asset" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -4628,7 +4628,7 @@ dependencies = [ [[package]] name = "penumbra-auction" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -4683,7 +4683,7 @@ dependencies = [ [[package]] name = "penumbra-auto-https" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "axum-server", @@ -4695,7 +4695,7 @@ dependencies = [ [[package]] name = "penumbra-bench" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-bls12-377", @@ -4739,7 +4739,7 @@ dependencies = [ [[package]] name = "penumbra-community-pool" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -4771,7 +4771,7 @@ dependencies = [ [[package]] name = "penumbra-compact-block" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -4806,7 +4806,7 @@ dependencies = [ [[package]] name = "penumbra-custody" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "argon2", @@ -4842,7 +4842,7 @@ dependencies = [ [[package]] name = "penumbra-dex" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -4904,7 +4904,7 @@ dependencies = [ [[package]] name = "penumbra-distributions" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "async-trait", @@ -4922,7 +4922,7 @@ dependencies = [ [[package]] name = "penumbra-eddy" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -4940,7 +4940,7 @@ dependencies = [ [[package]] name = "penumbra-fee" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -4967,7 +4967,7 @@ dependencies = [ [[package]] name = "penumbra-funding" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "async-trait", @@ -4990,7 +4990,7 @@ dependencies = [ [[package]] name = "penumbra-governance" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -5044,7 +5044,7 @@ dependencies = [ [[package]] name = "penumbra-ibc" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -5081,7 +5081,7 @@ dependencies = [ [[package]] name = "penumbra-keys" -version = "0.80.2" +version = "0.80.3" dependencies = [ "aes", "anyhow", @@ -5128,7 +5128,7 @@ dependencies = [ [[package]] name = "penumbra-measure" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "bytesize", @@ -5146,7 +5146,7 @@ dependencies = [ [[package]] name = "penumbra-mock-client" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "cnidarium", @@ -5163,7 +5163,7 @@ dependencies = [ [[package]] name = "penumbra-mock-consensus" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "bytes", @@ -5183,7 +5183,7 @@ dependencies = [ [[package]] name = "penumbra-mock-tendermint-proxy" -version = "0.80.2" +version = "0.80.3" dependencies = [ "hex", "pbjson-types", @@ -5198,7 +5198,7 @@ dependencies = [ [[package]] name = "penumbra-num" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -5235,7 +5235,7 @@ dependencies = [ [[package]] name = "penumbra-proof-params" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ec", @@ -5263,7 +5263,7 @@ dependencies = [ [[package]] name = "penumbra-proof-setup" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ec", @@ -5290,7 +5290,7 @@ dependencies = [ [[package]] name = "penumbra-proto" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "async-trait", @@ -5324,7 +5324,7 @@ dependencies = [ [[package]] name = "penumbra-sct" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -5360,7 +5360,7 @@ dependencies = [ [[package]] name = "penumbra-shielded-pool" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -5414,7 +5414,7 @@ dependencies = [ [[package]] name = "penumbra-stake" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -5467,7 +5467,7 @@ dependencies = [ [[package]] name = "penumbra-tct" -version = "0.80.2" +version = "0.80.3" dependencies = [ "ark-ed-on-bls12-377", "ark-ff", @@ -5499,7 +5499,7 @@ dependencies = [ [[package]] name = "penumbra-tct-property-test" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "futures", @@ -5511,7 +5511,7 @@ dependencies = [ [[package]] name = "penumbra-tct-visualize" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "axum", @@ -5541,7 +5541,7 @@ dependencies = [ [[package]] name = "penumbra-tendermint-proxy" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "chrono", @@ -5573,7 +5573,7 @@ dependencies = [ [[package]] name = "penumbra-test-subscriber" -version = "0.80.2" +version = "0.80.3" dependencies = [ "tracing", "tracing-subscriber 0.3.18", @@ -5581,7 +5581,7 @@ dependencies = [ [[package]] name = "penumbra-tower-trace" -version = "0.80.2" +version = "0.80.3" dependencies = [ "futures", "hex", @@ -5602,7 +5602,7 @@ dependencies = [ [[package]] name = "penumbra-transaction" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-ff", @@ -5655,7 +5655,7 @@ dependencies = [ [[package]] name = "penumbra-txhash" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "blake2b_simd 1.0.2", @@ -5668,7 +5668,7 @@ dependencies = [ [[package]] name = "penumbra-view" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-std", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "penumbra-wallet" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-std", @@ -5812,7 +5812,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pindexer" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "clap", @@ -7662,7 +7662,7 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "summonerd" -version = "0.80.2" +version = "0.80.3" dependencies = [ "anyhow", "ark-groth16", diff --git a/Cargo.toml b/Cargo.toml index a6a8408f50..3a68331621 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,7 +104,7 @@ push = false [workspace.package] authors = ["Penumbra Labs "] edition = "2021" -version = "0.80.2" +version = "0.80.3" repository = "https://github.com/penumbra-zone/penumbra" homepage = "https://penumbra.zone" license = "MIT OR Apache-2.0" From 7601c8c98f131e9baf65bd295f05fa0b314f6406 Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Wed, 28 Aug 2024 16:51:28 -0700 Subject: [PATCH 08/43] chore: update chain id for testnet 80 (#4841) Contains a few small edits: * documents historical testnets * updates base allocations with new addresses * modifies testnet generation logic to match * generates new Testnet 80 config * updates dev env to use gas prices by default Used these to generate local devnets while testing out the new allocation settings. Doing all this in preparation for launching a new public testnet. --- crates/bin/pd/src/network/generate.rs | 8 +- deployments/scripts/run-local-devnet.sh | 1 + testnets/074-deimos-8/README.md | 4 + testnets/075-deimos-8/README.md | 4 + testnets/076-deimos-8/README.md | 4 + testnets/077-deimos-8/README.md | 4 + testnets/078-deimos-8/README.md | 4 + testnets/079-deimos-8/README.md | 4 + testnets/080-phobos/allocations.csv | 414 ++++++++++++++++++++++++ testnets/080-phobos/validators.json | 9 + testnets/base_addresses.txt | 8 +- testnets/base_allocations.csv | 53 +-- testnets/new-testnet.sh | 22 +- testnets/small_addresses.txt | 5 + testnets/validators-ci.json | 54 +--- 15 files changed, 513 insertions(+), 85 deletions(-) create mode 100644 testnets/074-deimos-8/README.md create mode 100644 testnets/075-deimos-8/README.md create mode 100644 testnets/076-deimos-8/README.md create mode 100644 testnets/077-deimos-8/README.md create mode 100644 testnets/078-deimos-8/README.md create mode 100644 testnets/079-deimos-8/README.md create mode 100644 testnets/080-phobos/allocations.csv create mode 100644 testnets/080-phobos/validators.json create mode 100644 testnets/small_addresses.txt diff --git a/crates/bin/pd/src/network/generate.rs b/crates/bin/pd/src/network/generate.rs index 9208c20920..437c255e3b 100644 --- a/crates/bin/pd/src/network/generate.rs +++ b/crates/bin/pd/src/network/generate.rs @@ -660,8 +660,12 @@ impl TryFrom for shielded_pool_genesis::Allocation { Ok(shielded_pool_genesis::Allocation { raw_amount: a.amount.into(), raw_denom: a.denom.clone(), - address: Address::from_str(&a.address) - .context("invalid address format in genesis allocations")?, + address: Address::from_str(&a.address).with_context(|| { + format!( + "invalid address format in genesis allocations: {}", + &a.address + ) + })?, }) } } diff --git a/deployments/scripts/run-local-devnet.sh b/deployments/scripts/run-local-devnet.sh index 187b9357b6..78db9d1759 100755 --- a/deployments/scripts/run-local-devnet.sh +++ b/deployments/scripts/run-local-devnet.sh @@ -22,6 +22,7 @@ else --unbonding-delay 50 \ --epoch-duration 50 \ --proposal-voting-blocks 50 \ + --gas-price-simple 500 \ --timeout-commit 1s # opt in to cometbft abci indexing to postgres postgresql_db_url="postgresql://penumbra:penumbra@localhost:5432/penumbra_cometbft?sslmode=disable" diff --git a/testnets/074-deimos-8/README.md b/testnets/074-deimos-8/README.md new file mode 100644 index 0000000000..4a864f8da4 --- /dev/null +++ b/testnets/074-deimos-8/README.md @@ -0,0 +1,4 @@ +# Testnet 74 + +Testnet 74 was short-lived due to a deploy mistake, see context > [!IMPORTANT] +https://github.com/penumbra-zone/penumbra/issues/4325. diff --git a/testnets/075-deimos-8/README.md b/testnets/075-deimos-8/README.md new file mode 100644 index 0000000000..070b967064 --- /dev/null +++ b/testnets/075-deimos-8/README.md @@ -0,0 +1,4 @@ +# Testnet 75 + +Testnet 75 involved a chain reset, required due to deploy mistake on Testnet 74 +causing diverged state. See details in https://github.com/penumbra-zone/penumbra/issues/4374. diff --git a/testnets/076-deimos-8/README.md b/testnets/076-deimos-8/README.md new file mode 100644 index 0000000000..cb9a3fa34f --- /dev/null +++ b/testnets/076-deimos-8/README.md @@ -0,0 +1,4 @@ +# Testnet 76 + +Testnet 76 was released as a chain upgrade, reusing the initial state +from Testnet 75. See details in https://github.com/penumbra-zone/penumbra/issues/4402. diff --git a/testnets/077-deimos-8/README.md b/testnets/077-deimos-8/README.md new file mode 100644 index 0000000000..e5add4557c --- /dev/null +++ b/testnets/077-deimos-8/README.md @@ -0,0 +1,4 @@ +# Testnet 77 + +Testnet 77 was released as a chain upgrade, carrying over the allocations +first used in Testnet 75. See details in https://github.com/penumbra-zone/penumbra/issues/4497. diff --git a/testnets/078-deimos-8/README.md b/testnets/078-deimos-8/README.md new file mode 100644 index 0000000000..e18f18aee3 --- /dev/null +++ b/testnets/078-deimos-8/README.md @@ -0,0 +1,4 @@ +# Testnet 78 + +Testnet 78 was released as a chain upgrade, carrying over the allocations first used +Testnet 75. See details in https://github.com/penumbra-zone/penumbra/issues/4582. diff --git a/testnets/079-deimos-8/README.md b/testnets/079-deimos-8/README.md new file mode 100644 index 0000000000..82783adb81 --- /dev/null +++ b/testnets/079-deimos-8/README.md @@ -0,0 +1,4 @@ +# Testnet 79 + +There was no Testnet 79: at the time v0.79.0 was released, the pre-existing testnet +infrastructure was destroyed, pending a redesign. diff --git a/testnets/080-phobos/allocations.csv b/testnets/080-phobos/allocations.csv new file mode 100644 index 0000000000..f4d069a50f --- /dev/null +++ b/testnets/080-phobos/allocations.csv @@ -0,0 +1,414 @@ +amount,denom,address +1_000_000__000_000,upenumbra,penumbra1tj9s0dh8ymphnw2qgrva57wpcmpr38e9a8w05t5c8dyynr7dt58hgnare7tunqyuryc7yag084rqxrsrj9yx8uk9yhs8lmg64hl993wn3rphlvan3paxdd6suyhwnpkd6cwzes +20_000,gm,penumbra1tj9s0dh8ymphnw2qgrva57wpcmpr38e9a8w05t5c8dyynr7dt58hgnare7tunqyuryc7yag084rqxrsrj9yx8uk9yhs8lmg64hl993wn3rphlvan3paxdd6suyhwnpkd6cwzes +20_000,gn,penumbra1tj9s0dh8ymphnw2qgrva57wpcmpr38e9a8w05t5c8dyynr7dt58hgnare7tunqyuryc7yag084rqxrsrj9yx8uk9yhs8lmg64hl993wn3rphlvan3paxdd6suyhwnpkd6cwzes +10_000,pizza,penumbra1tj9s0dh8ymphnw2qgrva57wpcmpr38e9a8w05t5c8dyynr7dt58hgnare7tunqyuryc7yag084rqxrsrj9yx8uk9yhs8lmg64hl993wn3rphlvan3paxdd6suyhwnpkd6cwzes +100,cube,penumbra1tj9s0dh8ymphnw2qgrva57wpcmpr38e9a8w05t5c8dyynr7dt58hgnare7tunqyuryc7yag084rqxrsrj9yx8uk9yhs8lmg64hl993wn3rphlvan3paxdd6suyhwnpkd6cwzes +500_000,test_usd,penumbra1tj9s0dh8ymphnw2qgrva57wpcmpr38e9a8w05t5c8dyynr7dt58hgnare7tunqyuryc7yag084rqxrsrj9yx8uk9yhs8lmg64hl993wn3rphlvan3paxdd6suyhwnpkd6cwzes +1_000_000__000_000,upenumbra,penumbra12ts5g469dpjdcn2wmdkz25nng6snxkuxumfnmygtrp2fm6776zjw7r35y2j7s07h8368a4kr3c904w8cjgpm6v3ysaj6gh930z7fwlqq9kvjewsy9fgamzxyj874kgu5uvcshg +20_000,gm,penumbra12ts5g469dpjdcn2wmdkz25nng6snxkuxumfnmygtrp2fm6776zjw7r35y2j7s07h8368a4kr3c904w8cjgpm6v3ysaj6gh930z7fwlqq9kvjewsy9fgamzxyj874kgu5uvcshg +20_000,gn,penumbra12ts5g469dpjdcn2wmdkz25nng6snxkuxumfnmygtrp2fm6776zjw7r35y2j7s07h8368a4kr3c904w8cjgpm6v3ysaj6gh930z7fwlqq9kvjewsy9fgamzxyj874kgu5uvcshg +10_000,pizza,penumbra12ts5g469dpjdcn2wmdkz25nng6snxkuxumfnmygtrp2fm6776zjw7r35y2j7s07h8368a4kr3c904w8cjgpm6v3ysaj6gh930z7fwlqq9kvjewsy9fgamzxyj874kgu5uvcshg +100,cube,penumbra12ts5g469dpjdcn2wmdkz25nng6snxkuxumfnmygtrp2fm6776zjw7r35y2j7s07h8368a4kr3c904w8cjgpm6v3ysaj6gh930z7fwlqq9kvjewsy9fgamzxyj874kgu5uvcshg +500_000,test_usd,penumbra12ts5g469dpjdcn2wmdkz25nng6snxkuxumfnmygtrp2fm6776zjw7r35y2j7s07h8368a4kr3c904w8cjgpm6v3ysaj6gh930z7fwlqq9kvjewsy9fgamzxyj874kgu5uvcshg +1_000_000__000_000,upenumbra,penumbra19zz058ttl8vhsypztc0gyl9yfs7jcn3906kgd3pzeh944klh8vf2ttx7qvscxwtuecw92cy6n55ttjn482q7ufpzwj5yem9xcvecrd2zc6vgctxzc3k7mnpg0lk8vved00e3g0 +20_000,gm,penumbra19zz058ttl8vhsypztc0gyl9yfs7jcn3906kgd3pzeh944klh8vf2ttx7qvscxwtuecw92cy6n55ttjn482q7ufpzwj5yem9xcvecrd2zc6vgctxzc3k7mnpg0lk8vved00e3g0 +20_000,gn,penumbra19zz058ttl8vhsypztc0gyl9yfs7jcn3906kgd3pzeh944klh8vf2ttx7qvscxwtuecw92cy6n55ttjn482q7ufpzwj5yem9xcvecrd2zc6vgctxzc3k7mnpg0lk8vved00e3g0 +10_000,pizza,penumbra19zz058ttl8vhsypztc0gyl9yfs7jcn3906kgd3pzeh944klh8vf2ttx7qvscxwtuecw92cy6n55ttjn482q7ufpzwj5yem9xcvecrd2zc6vgctxzc3k7mnpg0lk8vved00e3g0 +100,cube,penumbra19zz058ttl8vhsypztc0gyl9yfs7jcn3906kgd3pzeh944klh8vf2ttx7qvscxwtuecw92cy6n55ttjn482q7ufpzwj5yem9xcvecrd2zc6vgctxzc3k7mnpg0lk8vved00e3g0 +500_000,test_usd,penumbra19zz058ttl8vhsypztc0gyl9yfs7jcn3906kgd3pzeh944klh8vf2ttx7qvscxwtuecw92cy6n55ttjn482q7ufpzwj5yem9xcvecrd2zc6vgctxzc3k7mnpg0lk8vved00e3g0 +1_000_000__000_000,upenumbra,penumbra1hckq024z3fd6wl29kk4rwfnykgyxyawpuw8zc04npc7s40hey0xghltdcsk7q5k0cq77qjtg8gt0cnvqff94s8j5tvx94ssyjyr35c9rqx08lkwxfqgr6dt3vu99wg8dg0c7jz +20_000,gm,penumbra1hckq024z3fd6wl29kk4rwfnykgyxyawpuw8zc04npc7s40hey0xghltdcsk7q5k0cq77qjtg8gt0cnvqff94s8j5tvx94ssyjyr35c9rqx08lkwxfqgr6dt3vu99wg8dg0c7jz +20_000,gn,penumbra1hckq024z3fd6wl29kk4rwfnykgyxyawpuw8zc04npc7s40hey0xghltdcsk7q5k0cq77qjtg8gt0cnvqff94s8j5tvx94ssyjyr35c9rqx08lkwxfqgr6dt3vu99wg8dg0c7jz +10_000,pizza,penumbra1hckq024z3fd6wl29kk4rwfnykgyxyawpuw8zc04npc7s40hey0xghltdcsk7q5k0cq77qjtg8gt0cnvqff94s8j5tvx94ssyjyr35c9rqx08lkwxfqgr6dt3vu99wg8dg0c7jz +100,cube,penumbra1hckq024z3fd6wl29kk4rwfnykgyxyawpuw8zc04npc7s40hey0xghltdcsk7q5k0cq77qjtg8gt0cnvqff94s8j5tvx94ssyjyr35c9rqx08lkwxfqgr6dt3vu99wg8dg0c7jz +500_000,test_usd,penumbra1hckq024z3fd6wl29kk4rwfnykgyxyawpuw8zc04npc7s40hey0xghltdcsk7q5k0cq77qjtg8gt0cnvqff94s8j5tvx94ssyjyr35c9rqx08lkwxfqgr6dt3vu99wg8dg0c7jz +1_000_000__000_000,upenumbra,penumbra1f5qra6dk8pmp0m2s42sq4qsxthjp6mmcpj3rjwrn74getpmwhrgzhe545fhz6eew2gdcpn2ee0j7fdm4whxl8ux6p5jz6g5fz72u4pzd4z26jnxeasvsvwapx2lvhxw8uy7dvw +20_000,gm,penumbra1f5qra6dk8pmp0m2s42sq4qsxthjp6mmcpj3rjwrn74getpmwhrgzhe545fhz6eew2gdcpn2ee0j7fdm4whxl8ux6p5jz6g5fz72u4pzd4z26jnxeasvsvwapx2lvhxw8uy7dvw +20_000,gn,penumbra1f5qra6dk8pmp0m2s42sq4qsxthjp6mmcpj3rjwrn74getpmwhrgzhe545fhz6eew2gdcpn2ee0j7fdm4whxl8ux6p5jz6g5fz72u4pzd4z26jnxeasvsvwapx2lvhxw8uy7dvw +10_000,pizza,penumbra1f5qra6dk8pmp0m2s42sq4qsxthjp6mmcpj3rjwrn74getpmwhrgzhe545fhz6eew2gdcpn2ee0j7fdm4whxl8ux6p5jz6g5fz72u4pzd4z26jnxeasvsvwapx2lvhxw8uy7dvw +100,cube,penumbra1f5qra6dk8pmp0m2s42sq4qsxthjp6mmcpj3rjwrn74getpmwhrgzhe545fhz6eew2gdcpn2ee0j7fdm4whxl8ux6p5jz6g5fz72u4pzd4z26jnxeasvsvwapx2lvhxw8uy7dvw +500_000,test_usd,penumbra1f5qra6dk8pmp0m2s42sq4qsxthjp6mmcpj3rjwrn74getpmwhrgzhe545fhz6eew2gdcpn2ee0j7fdm4whxl8ux6p5jz6g5fz72u4pzd4z26jnxeasvsvwapx2lvhxw8uy7dvw +1_000_000__000_000,upenumbra,penumbra18dms48wvudc7qljl6zjq48hyuvxxvhm2se2zltpjndpnn5c092mwych7uvs0xztpc7q0e2j7s89nyfxyj2pmu50zafvhwq5x4prm49ax6c7txmvjjdkgu9c8u0nhyqgrsmfn2c +20_000,gm,penumbra18dms48wvudc7qljl6zjq48hyuvxxvhm2se2zltpjndpnn5c092mwych7uvs0xztpc7q0e2j7s89nyfxyj2pmu50zafvhwq5x4prm49ax6c7txmvjjdkgu9c8u0nhyqgrsmfn2c +20_000,gn,penumbra18dms48wvudc7qljl6zjq48hyuvxxvhm2se2zltpjndpnn5c092mwych7uvs0xztpc7q0e2j7s89nyfxyj2pmu50zafvhwq5x4prm49ax6c7txmvjjdkgu9c8u0nhyqgrsmfn2c +10_000,pizza,penumbra18dms48wvudc7qljl6zjq48hyuvxxvhm2se2zltpjndpnn5c092mwych7uvs0xztpc7q0e2j7s89nyfxyj2pmu50zafvhwq5x4prm49ax6c7txmvjjdkgu9c8u0nhyqgrsmfn2c +100,cube,penumbra18dms48wvudc7qljl6zjq48hyuvxxvhm2se2zltpjndpnn5c092mwych7uvs0xztpc7q0e2j7s89nyfxyj2pmu50zafvhwq5x4prm49ax6c7txmvjjdkgu9c8u0nhyqgrsmfn2c +500_000,test_usd,penumbra18dms48wvudc7qljl6zjq48hyuvxxvhm2se2zltpjndpnn5c092mwych7uvs0xztpc7q0e2j7s89nyfxyj2pmu50zafvhwq5x4prm49ax6c7txmvjjdkgu9c8u0nhyqgrsmfn2c +1_000_000__000_000,upenumbra,penumbra169w7gexlfa9m7gsa3vmwyfpu92j0yxawmlkt06g3qg5ta29czwzepzqp0d0nl324jgcjrc6fy0kmqhjlureeyjulejug8e2h6zj0kqf0vapwszahqm8swg634extm3837kl59v +20_000,gm,penumbra169w7gexlfa9m7gsa3vmwyfpu92j0yxawmlkt06g3qg5ta29czwzepzqp0d0nl324jgcjrc6fy0kmqhjlureeyjulejug8e2h6zj0kqf0vapwszahqm8swg634extm3837kl59v +20_000,gn,penumbra169w7gexlfa9m7gsa3vmwyfpu92j0yxawmlkt06g3qg5ta29czwzepzqp0d0nl324jgcjrc6fy0kmqhjlureeyjulejug8e2h6zj0kqf0vapwszahqm8swg634extm3837kl59v +10_000,pizza,penumbra169w7gexlfa9m7gsa3vmwyfpu92j0yxawmlkt06g3qg5ta29czwzepzqp0d0nl324jgcjrc6fy0kmqhjlureeyjulejug8e2h6zj0kqf0vapwszahqm8swg634extm3837kl59v +100,cube,penumbra169w7gexlfa9m7gsa3vmwyfpu92j0yxawmlkt06g3qg5ta29czwzepzqp0d0nl324jgcjrc6fy0kmqhjlureeyjulejug8e2h6zj0kqf0vapwszahqm8swg634extm3837kl59v +500_000,test_usd,penumbra169w7gexlfa9m7gsa3vmwyfpu92j0yxawmlkt06g3qg5ta29czwzepzqp0d0nl324jgcjrc6fy0kmqhjlureeyjulejug8e2h6zj0kqf0vapwszahqm8swg634extm3837kl59v +1_000_000__000_000,upenumbra,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +20_000,gm,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +20_000,gn,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +10_000,pizza,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +100,cube,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +500_000,test_usd,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +1_000_000__000_000,upenumbra,penumbra1ks9t4vrp9alvk9yfapyu75hjf7p3wva76t0dz9a902zlp8vuyn0q9j2a72pu9j0uxmg356xstqpmafkj28ktx37l4lzq25nmgdeay5ls00yky2pgladnr5z7u3ftav7lw9vnre +20_000,gm,penumbra1ks9t4vrp9alvk9yfapyu75hjf7p3wva76t0dz9a902zlp8vuyn0q9j2a72pu9j0uxmg356xstqpmafkj28ktx37l4lzq25nmgdeay5ls00yky2pgladnr5z7u3ftav7lw9vnre +20_000,gn,penumbra1ks9t4vrp9alvk9yfapyu75hjf7p3wva76t0dz9a902zlp8vuyn0q9j2a72pu9j0uxmg356xstqpmafkj28ktx37l4lzq25nmgdeay5ls00yky2pgladnr5z7u3ftav7lw9vnre +10_000,pizza,penumbra1ks9t4vrp9alvk9yfapyu75hjf7p3wva76t0dz9a902zlp8vuyn0q9j2a72pu9j0uxmg356xstqpmafkj28ktx37l4lzq25nmgdeay5ls00yky2pgladnr5z7u3ftav7lw9vnre +100,cube,penumbra1ks9t4vrp9alvk9yfapyu75hjf7p3wva76t0dz9a902zlp8vuyn0q9j2a72pu9j0uxmg356xstqpmafkj28ktx37l4lzq25nmgdeay5ls00yky2pgladnr5z7u3ftav7lw9vnre +500_000,test_usd,penumbra1ks9t4vrp9alvk9yfapyu75hjf7p3wva76t0dz9a902zlp8vuyn0q9j2a72pu9j0uxmg356xstqpmafkj28ktx37l4lzq25nmgdeay5ls00yky2pgladnr5z7u3ftav7lw9vnre +1_000_000__000_000,upenumbra,penumbra183uj3sh6d22j6mguu3vlpfcp87lh93jfzn3ucehfk2j4ek07jnwelznq2k8jk200gwrnyyvttexay2u638l7s6dhnh7vec3wscyefslswnhwd44ywqfxzxd4wdt8eqxy8st7k0 +20_000,gm,penumbra183uj3sh6d22j6mguu3vlpfcp87lh93jfzn3ucehfk2j4ek07jnwelznq2k8jk200gwrnyyvttexay2u638l7s6dhnh7vec3wscyefslswnhwd44ywqfxzxd4wdt8eqxy8st7k0 +20_000,gn,penumbra183uj3sh6d22j6mguu3vlpfcp87lh93jfzn3ucehfk2j4ek07jnwelznq2k8jk200gwrnyyvttexay2u638l7s6dhnh7vec3wscyefslswnhwd44ywqfxzxd4wdt8eqxy8st7k0 +10_000,pizza,penumbra183uj3sh6d22j6mguu3vlpfcp87lh93jfzn3ucehfk2j4ek07jnwelznq2k8jk200gwrnyyvttexay2u638l7s6dhnh7vec3wscyefslswnhwd44ywqfxzxd4wdt8eqxy8st7k0 +100,cube,penumbra183uj3sh6d22j6mguu3vlpfcp87lh93jfzn3ucehfk2j4ek07jnwelznq2k8jk200gwrnyyvttexay2u638l7s6dhnh7vec3wscyefslswnhwd44ywqfxzxd4wdt8eqxy8st7k0 +500_000,test_usd,penumbra183uj3sh6d22j6mguu3vlpfcp87lh93jfzn3ucehfk2j4ek07jnwelznq2k8jk200gwrnyyvttexay2u638l7s6dhnh7vec3wscyefslswnhwd44ywqfxzxd4wdt8eqxy8st7k0 +1_000_000__000_000,upenumbra,penumbra1r70z64vqnmv28rttpv60hppd7gca9w5a3zwctx4c6yrtfd4mlvjrevkx3l2swdnw8vq7amylgfwaq2x97dlxmmh0flpxetmd8gjf0rpmg083ms94psmvpgy7hgdhwwn6nr92sc +20_000,gm,penumbra1r70z64vqnmv28rttpv60hppd7gca9w5a3zwctx4c6yrtfd4mlvjrevkx3l2swdnw8vq7amylgfwaq2x97dlxmmh0flpxetmd8gjf0rpmg083ms94psmvpgy7hgdhwwn6nr92sc +20_000,gn,penumbra1r70z64vqnmv28rttpv60hppd7gca9w5a3zwctx4c6yrtfd4mlvjrevkx3l2swdnw8vq7amylgfwaq2x97dlxmmh0flpxetmd8gjf0rpmg083ms94psmvpgy7hgdhwwn6nr92sc +10_000,pizza,penumbra1r70z64vqnmv28rttpv60hppd7gca9w5a3zwctx4c6yrtfd4mlvjrevkx3l2swdnw8vq7amylgfwaq2x97dlxmmh0flpxetmd8gjf0rpmg083ms94psmvpgy7hgdhwwn6nr92sc +100,cube,penumbra1r70z64vqnmv28rttpv60hppd7gca9w5a3zwctx4c6yrtfd4mlvjrevkx3l2swdnw8vq7amylgfwaq2x97dlxmmh0flpxetmd8gjf0rpmg083ms94psmvpgy7hgdhwwn6nr92sc +500_000,test_usd,penumbra1r70z64vqnmv28rttpv60hppd7gca9w5a3zwctx4c6yrtfd4mlvjrevkx3l2swdnw8vq7amylgfwaq2x97dlxmmh0flpxetmd8gjf0rpmg083ms94psmvpgy7hgdhwwn6nr92sc +1_000_000__000_000,upenumbra,penumbra18z69jxec5008krvlhw84459ecermc2x5r06flunlz6m45ftmphp2djp9cpxy4n4m2tykz00jjd3pgm5n3etpaq43ypt4xffcz8ag0ay3hutt7hdfevmjfft237j2x2vae99v3f +20_000,gm,penumbra18z69jxec5008krvlhw84459ecermc2x5r06flunlz6m45ftmphp2djp9cpxy4n4m2tykz00jjd3pgm5n3etpaq43ypt4xffcz8ag0ay3hutt7hdfevmjfft237j2x2vae99v3f +20_000,gn,penumbra18z69jxec5008krvlhw84459ecermc2x5r06flunlz6m45ftmphp2djp9cpxy4n4m2tykz00jjd3pgm5n3etpaq43ypt4xffcz8ag0ay3hutt7hdfevmjfft237j2x2vae99v3f +10_000,pizza,penumbra18z69jxec5008krvlhw84459ecermc2x5r06flunlz6m45ftmphp2djp9cpxy4n4m2tykz00jjd3pgm5n3etpaq43ypt4xffcz8ag0ay3hutt7hdfevmjfft237j2x2vae99v3f +100,cube,penumbra18z69jxec5008krvlhw84459ecermc2x5r06flunlz6m45ftmphp2djp9cpxy4n4m2tykz00jjd3pgm5n3etpaq43ypt4xffcz8ag0ay3hutt7hdfevmjfft237j2x2vae99v3f +500_000,test_usd,penumbra18z69jxec5008krvlhw84459ecermc2x5r06flunlz6m45ftmphp2djp9cpxy4n4m2tykz00jjd3pgm5n3etpaq43ypt4xffcz8ag0ay3hutt7hdfevmjfft237j2x2vae99v3f +1_000_000__000_000,upenumbra,penumbra1q87euedu4yw3f8c025zulgu6tgc8frmhak7ge6xwyauuptlp4z25mn92z6qpa9nhdp08rfs3lakxqpgpd58m8u02cjcrpec0xdt6t9zlgq7fkxmklvtpktykwn0rdlxeqyny2n +20_000,gm,penumbra1q87euedu4yw3f8c025zulgu6tgc8frmhak7ge6xwyauuptlp4z25mn92z6qpa9nhdp08rfs3lakxqpgpd58m8u02cjcrpec0xdt6t9zlgq7fkxmklvtpktykwn0rdlxeqyny2n +20_000,gn,penumbra1q87euedu4yw3f8c025zulgu6tgc8frmhak7ge6xwyauuptlp4z25mn92z6qpa9nhdp08rfs3lakxqpgpd58m8u02cjcrpec0xdt6t9zlgq7fkxmklvtpktykwn0rdlxeqyny2n +10_000,pizza,penumbra1q87euedu4yw3f8c025zulgu6tgc8frmhak7ge6xwyauuptlp4z25mn92z6qpa9nhdp08rfs3lakxqpgpd58m8u02cjcrpec0xdt6t9zlgq7fkxmklvtpktykwn0rdlxeqyny2n +100,cube,penumbra1q87euedu4yw3f8c025zulgu6tgc8frmhak7ge6xwyauuptlp4z25mn92z6qpa9nhdp08rfs3lakxqpgpd58m8u02cjcrpec0xdt6t9zlgq7fkxmklvtpktykwn0rdlxeqyny2n +500_000,test_usd,penumbra1q87euedu4yw3f8c025zulgu6tgc8frmhak7ge6xwyauuptlp4z25mn92z6qpa9nhdp08rfs3lakxqpgpd58m8u02cjcrpec0xdt6t9zlgq7fkxmklvtpktykwn0rdlxeqyny2n +1_000_000__000_000,upenumbra,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg +20_000,gm,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg +20_000,gn,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg +10_000,pizza,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg +100,cube,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg +500_000,test_usd,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg +1_000_000__000_000,upenumbra,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +20_000,gm,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +20_000,gn,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +10_000,pizza,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +100,cube,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +500_000,test_usd,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +1_000_000__000_000,upenumbra,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +20_000,gm,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +20_000,gn,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +10_000,pizza,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +100,cube,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +500_000,test_usd,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +1_000_000__000_000,upenumbra,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +20_000,gm,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +20_000,gn,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +10_000,pizza,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +100,cube,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +500_000,test_usd,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +5_000_000__000_000,upenumbra,penumbra1xq2e9x7uhfzezwunvazdamlxepf4jr5htsuqnzlsahuayyqxjjwg9lk0aytwm6wfj3jy29rv2kdpen57903s8wxv3jmqwj6m6v5jgn6y2cypfd03rke652k8wmavxra7e9wkrg +50_000,gm,penumbra1xq2e9x7uhfzezwunvazdamlxepf4jr5htsuqnzlsahuayyqxjjwg9lk0aytwm6wfj3jy29rv2kdpen57903s8wxv3jmqwj6m6v5jgn6y2cypfd03rke652k8wmavxra7e9wkrg +50_000,gn,penumbra1xq2e9x7uhfzezwunvazdamlxepf4jr5htsuqnzlsahuayyqxjjwg9lk0aytwm6wfj3jy29rv2kdpen57903s8wxv3jmqwj6m6v5jgn6y2cypfd03rke652k8wmavxra7e9wkrg +25_000,pizza,penumbra1xq2e9x7uhfzezwunvazdamlxepf4jr5htsuqnzlsahuayyqxjjwg9lk0aytwm6wfj3jy29rv2kdpen57903s8wxv3jmqwj6m6v5jgn6y2cypfd03rke652k8wmavxra7e9wkrg +250,cube,penumbra1xq2e9x7uhfzezwunvazdamlxepf4jr5htsuqnzlsahuayyqxjjwg9lk0aytwm6wfj3jy29rv2kdpen57903s8wxv3jmqwj6m6v5jgn6y2cypfd03rke652k8wmavxra7e9wkrg +1_000_000,test_usd,penumbra1xq2e9x7uhfzezwunvazdamlxepf4jr5htsuqnzlsahuayyqxjjwg9lk0aytwm6wfj3jy29rv2kdpen57903s8wxv3jmqwj6m6v5jgn6y2cypfd03rke652k8wmavxra7e9wkrg +10_000,nala,penumbra1xq2e9x7uhfzezwunvazdamlxepf4jr5htsuqnzlsahuayyqxjjwg9lk0aytwm6wfj3jy29rv2kdpen57903s8wxv3jmqwj6m6v5jgn6y2cypfd03rke652k8wmavxra7e9wkrg +5_000_000__000_000,upenumbra,penumbra1mflr95lhj0vjr86uyffkmxg6hjamjs7rrcykwgnyt8lcz0a6xqzh0xz78hpnq3x70s8cnu4cnyqq2wgkt9nthcc3k8scf4wlraccdmkr6wyzsap88t0du2dlv5xclgkmwmwnct +50_000,gm,penumbra1mflr95lhj0vjr86uyffkmxg6hjamjs7rrcykwgnyt8lcz0a6xqzh0xz78hpnq3x70s8cnu4cnyqq2wgkt9nthcc3k8scf4wlraccdmkr6wyzsap88t0du2dlv5xclgkmwmwnct +50_000,gn,penumbra1mflr95lhj0vjr86uyffkmxg6hjamjs7rrcykwgnyt8lcz0a6xqzh0xz78hpnq3x70s8cnu4cnyqq2wgkt9nthcc3k8scf4wlraccdmkr6wyzsap88t0du2dlv5xclgkmwmwnct +25_000,pizza,penumbra1mflr95lhj0vjr86uyffkmxg6hjamjs7rrcykwgnyt8lcz0a6xqzh0xz78hpnq3x70s8cnu4cnyqq2wgkt9nthcc3k8scf4wlraccdmkr6wyzsap88t0du2dlv5xclgkmwmwnct +250,cube,penumbra1mflr95lhj0vjr86uyffkmxg6hjamjs7rrcykwgnyt8lcz0a6xqzh0xz78hpnq3x70s8cnu4cnyqq2wgkt9nthcc3k8scf4wlraccdmkr6wyzsap88t0du2dlv5xclgkmwmwnct +1_000_000,test_usd,penumbra1mflr95lhj0vjr86uyffkmxg6hjamjs7rrcykwgnyt8lcz0a6xqzh0xz78hpnq3x70s8cnu4cnyqq2wgkt9nthcc3k8scf4wlraccdmkr6wyzsap88t0du2dlv5xclgkmwmwnct +10_000,nala,penumbra1mflr95lhj0vjr86uyffkmxg6hjamjs7rrcykwgnyt8lcz0a6xqzh0xz78hpnq3x70s8cnu4cnyqq2wgkt9nthcc3k8scf4wlraccdmkr6wyzsap88t0du2dlv5xclgkmwmwnct +5_000_000__000_000,upenumbra,penumbra1jdgsqxnqqhqwjc6j3lhm4qnqz4gu02j87p4v0uj95ur9xdtym9acnj6p6ykqx89crmj0gw8r6fl4xh4la2yxyks9czl4lvqsxm9e5u86gdus0fh9hqxvg8aq47cw9fv8l2vh34 +50_000,gm,penumbra1jdgsqxnqqhqwjc6j3lhm4qnqz4gu02j87p4v0uj95ur9xdtym9acnj6p6ykqx89crmj0gw8r6fl4xh4la2yxyks9czl4lvqsxm9e5u86gdus0fh9hqxvg8aq47cw9fv8l2vh34 +50_000,gn,penumbra1jdgsqxnqqhqwjc6j3lhm4qnqz4gu02j87p4v0uj95ur9xdtym9acnj6p6ykqx89crmj0gw8r6fl4xh4la2yxyks9czl4lvqsxm9e5u86gdus0fh9hqxvg8aq47cw9fv8l2vh34 +25_000,pizza,penumbra1jdgsqxnqqhqwjc6j3lhm4qnqz4gu02j87p4v0uj95ur9xdtym9acnj6p6ykqx89crmj0gw8r6fl4xh4la2yxyks9czl4lvqsxm9e5u86gdus0fh9hqxvg8aq47cw9fv8l2vh34 +250,cube,penumbra1jdgsqxnqqhqwjc6j3lhm4qnqz4gu02j87p4v0uj95ur9xdtym9acnj6p6ykqx89crmj0gw8r6fl4xh4la2yxyks9czl4lvqsxm9e5u86gdus0fh9hqxvg8aq47cw9fv8l2vh34 +1_000_000,test_usd,penumbra1jdgsqxnqqhqwjc6j3lhm4qnqz4gu02j87p4v0uj95ur9xdtym9acnj6p6ykqx89crmj0gw8r6fl4xh4la2yxyks9czl4lvqsxm9e5u86gdus0fh9hqxvg8aq47cw9fv8l2vh34 +10_000,nala,penumbra1jdgsqxnqqhqwjc6j3lhm4qnqz4gu02j87p4v0uj95ur9xdtym9acnj6p6ykqx89crmj0gw8r6fl4xh4la2yxyks9czl4lvqsxm9e5u86gdus0fh9hqxvg8aq47cw9fv8l2vh34 +5_000_000__000_000,upenumbra,penumbra1k20526f5je6c3mftpuu7p7mn3tr3p84x4qgm2nla9czwae9awse6mqsvn8qgr83y3cnhh7wkwv0s29ycs9r4p6lr5eeee0emquun8v7qlt7lt8w7ck3ukl5ctnufhzwmj00utn +50_000,gm,penumbra1k20526f5je6c3mftpuu7p7mn3tr3p84x4qgm2nla9czwae9awse6mqsvn8qgr83y3cnhh7wkwv0s29ycs9r4p6lr5eeee0emquun8v7qlt7lt8w7ck3ukl5ctnufhzwmj00utn +50_000,gn,penumbra1k20526f5je6c3mftpuu7p7mn3tr3p84x4qgm2nla9czwae9awse6mqsvn8qgr83y3cnhh7wkwv0s29ycs9r4p6lr5eeee0emquun8v7qlt7lt8w7ck3ukl5ctnufhzwmj00utn +25_000,pizza,penumbra1k20526f5je6c3mftpuu7p7mn3tr3p84x4qgm2nla9czwae9awse6mqsvn8qgr83y3cnhh7wkwv0s29ycs9r4p6lr5eeee0emquun8v7qlt7lt8w7ck3ukl5ctnufhzwmj00utn +250,cube,penumbra1k20526f5je6c3mftpuu7p7mn3tr3p84x4qgm2nla9czwae9awse6mqsvn8qgr83y3cnhh7wkwv0s29ycs9r4p6lr5eeee0emquun8v7qlt7lt8w7ck3ukl5ctnufhzwmj00utn +1_000_000,test_usd,penumbra1k20526f5je6c3mftpuu7p7mn3tr3p84x4qgm2nla9czwae9awse6mqsvn8qgr83y3cnhh7wkwv0s29ycs9r4p6lr5eeee0emquun8v7qlt7lt8w7ck3ukl5ctnufhzwmj00utn +10_000,nala,penumbra1k20526f5je6c3mftpuu7p7mn3tr3p84x4qgm2nla9czwae9awse6mqsvn8qgr83y3cnhh7wkwv0s29ycs9r4p6lr5eeee0emquun8v7qlt7lt8w7ck3ukl5ctnufhzwmj00utn +5_000_000__000_000,upenumbra,penumbra1s03zdrdjy4uc68rwtp2mz94lgg0k8fuyma4pphk6v2a66dap2d8xfd0l8rmtnh8l2pxdxyyy6gn9rqyzuga36sjnd30w6t0prq82wxve0tlx3veme9qpjlq82uvh6eytujzefr +50_000,gm,penumbra1s03zdrdjy4uc68rwtp2mz94lgg0k8fuyma4pphk6v2a66dap2d8xfd0l8rmtnh8l2pxdxyyy6gn9rqyzuga36sjnd30w6t0prq82wxve0tlx3veme9qpjlq82uvh6eytujzefr +50_000,gn,penumbra1s03zdrdjy4uc68rwtp2mz94lgg0k8fuyma4pphk6v2a66dap2d8xfd0l8rmtnh8l2pxdxyyy6gn9rqyzuga36sjnd30w6t0prq82wxve0tlx3veme9qpjlq82uvh6eytujzefr +25_000,pizza,penumbra1s03zdrdjy4uc68rwtp2mz94lgg0k8fuyma4pphk6v2a66dap2d8xfd0l8rmtnh8l2pxdxyyy6gn9rqyzuga36sjnd30w6t0prq82wxve0tlx3veme9qpjlq82uvh6eytujzefr +250,cube,penumbra1s03zdrdjy4uc68rwtp2mz94lgg0k8fuyma4pphk6v2a66dap2d8xfd0l8rmtnh8l2pxdxyyy6gn9rqyzuga36sjnd30w6t0prq82wxve0tlx3veme9qpjlq82uvh6eytujzefr +1_000_000,test_usd,penumbra1s03zdrdjy4uc68rwtp2mz94lgg0k8fuyma4pphk6v2a66dap2d8xfd0l8rmtnh8l2pxdxyyy6gn9rqyzuga36sjnd30w6t0prq82wxve0tlx3veme9qpjlq82uvh6eytujzefr +10_000,nala,penumbra1s03zdrdjy4uc68rwtp2mz94lgg0k8fuyma4pphk6v2a66dap2d8xfd0l8rmtnh8l2pxdxyyy6gn9rqyzuga36sjnd30w6t0prq82wxve0tlx3veme9qpjlq82uvh6eytujzefr +5_000_000__000_000,upenumbra,penumbra1ydlrv3mu25v5sz4fy0729lktq2xfwdhe4sfpe8886zmh82qn8j49ysnjc7cp3j4jzyp55m0a8dtr38uetzx97wjvcce5a9w42n5r7k5xzsypp59k0wkay5mq030lgdhz9pnf5w +50_000,gm,penumbra1ydlrv3mu25v5sz4fy0729lktq2xfwdhe4sfpe8886zmh82qn8j49ysnjc7cp3j4jzyp55m0a8dtr38uetzx97wjvcce5a9w42n5r7k5xzsypp59k0wkay5mq030lgdhz9pnf5w +50_000,gn,penumbra1ydlrv3mu25v5sz4fy0729lktq2xfwdhe4sfpe8886zmh82qn8j49ysnjc7cp3j4jzyp55m0a8dtr38uetzx97wjvcce5a9w42n5r7k5xzsypp59k0wkay5mq030lgdhz9pnf5w +25_000,pizza,penumbra1ydlrv3mu25v5sz4fy0729lktq2xfwdhe4sfpe8886zmh82qn8j49ysnjc7cp3j4jzyp55m0a8dtr38uetzx97wjvcce5a9w42n5r7k5xzsypp59k0wkay5mq030lgdhz9pnf5w +250,cube,penumbra1ydlrv3mu25v5sz4fy0729lktq2xfwdhe4sfpe8886zmh82qn8j49ysnjc7cp3j4jzyp55m0a8dtr38uetzx97wjvcce5a9w42n5r7k5xzsypp59k0wkay5mq030lgdhz9pnf5w +1_000_000,test_usd,penumbra1ydlrv3mu25v5sz4fy0729lktq2xfwdhe4sfpe8886zmh82qn8j49ysnjc7cp3j4jzyp55m0a8dtr38uetzx97wjvcce5a9w42n5r7k5xzsypp59k0wkay5mq030lgdhz9pnf5w +10_000,nala,penumbra1ydlrv3mu25v5sz4fy0729lktq2xfwdhe4sfpe8886zmh82qn8j49ysnjc7cp3j4jzyp55m0a8dtr38uetzx97wjvcce5a9w42n5r7k5xzsypp59k0wkay5mq030lgdhz9pnf5w +5_000_000__000_000,upenumbra,penumbra1csu9gn9x9pt237s3jmc9sen6d0th582623e737s9hymqp55hwnfp3j3mhjcksktzqpx0qls6n4anc9nl82w7y5zpeztg4dxma6lmgw6dwj76v7drqn8tznl86jnrpvm3zfg3y4 +50_000,gm,penumbra1csu9gn9x9pt237s3jmc9sen6d0th582623e737s9hymqp55hwnfp3j3mhjcksktzqpx0qls6n4anc9nl82w7y5zpeztg4dxma6lmgw6dwj76v7drqn8tznl86jnrpvm3zfg3y4 +50_000,gn,penumbra1csu9gn9x9pt237s3jmc9sen6d0th582623e737s9hymqp55hwnfp3j3mhjcksktzqpx0qls6n4anc9nl82w7y5zpeztg4dxma6lmgw6dwj76v7drqn8tznl86jnrpvm3zfg3y4 +25_000,pizza,penumbra1csu9gn9x9pt237s3jmc9sen6d0th582623e737s9hymqp55hwnfp3j3mhjcksktzqpx0qls6n4anc9nl82w7y5zpeztg4dxma6lmgw6dwj76v7drqn8tznl86jnrpvm3zfg3y4 +250,cube,penumbra1csu9gn9x9pt237s3jmc9sen6d0th582623e737s9hymqp55hwnfp3j3mhjcksktzqpx0qls6n4anc9nl82w7y5zpeztg4dxma6lmgw6dwj76v7drqn8tznl86jnrpvm3zfg3y4 +1_000_000,test_usd,penumbra1csu9gn9x9pt237s3jmc9sen6d0th582623e737s9hymqp55hwnfp3j3mhjcksktzqpx0qls6n4anc9nl82w7y5zpeztg4dxma6lmgw6dwj76v7drqn8tznl86jnrpvm3zfg3y4 +10_000,nala,penumbra1csu9gn9x9pt237s3jmc9sen6d0th582623e737s9hymqp55hwnfp3j3mhjcksktzqpx0qls6n4anc9nl82w7y5zpeztg4dxma6lmgw6dwj76v7drqn8tznl86jnrpvm3zfg3y4 +5_000_000__000_000,upenumbra,penumbra1ufmv2ztuts8uuwa0lhn5v7fyp7tkrhseddkx54m538qknjrdyexj9kc5lez8s925vrqlkn724elqfq657gdpfr3z2qn3krhxx3zfpgnq43mct9zwzjjqscueml8w349php0aup +50_000,gm,penumbra1ufmv2ztuts8uuwa0lhn5v7fyp7tkrhseddkx54m538qknjrdyexj9kc5lez8s925vrqlkn724elqfq657gdpfr3z2qn3krhxx3zfpgnq43mct9zwzjjqscueml8w349php0aup +50_000,gn,penumbra1ufmv2ztuts8uuwa0lhn5v7fyp7tkrhseddkx54m538qknjrdyexj9kc5lez8s925vrqlkn724elqfq657gdpfr3z2qn3krhxx3zfpgnq43mct9zwzjjqscueml8w349php0aup +25_000,pizza,penumbra1ufmv2ztuts8uuwa0lhn5v7fyp7tkrhseddkx54m538qknjrdyexj9kc5lez8s925vrqlkn724elqfq657gdpfr3z2qn3krhxx3zfpgnq43mct9zwzjjqscueml8w349php0aup +250,cube,penumbra1ufmv2ztuts8uuwa0lhn5v7fyp7tkrhseddkx54m538qknjrdyexj9kc5lez8s925vrqlkn724elqfq657gdpfr3z2qn3krhxx3zfpgnq43mct9zwzjjqscueml8w349php0aup +1_000_000,test_usd,penumbra1ufmv2ztuts8uuwa0lhn5v7fyp7tkrhseddkx54m538qknjrdyexj9kc5lez8s925vrqlkn724elqfq657gdpfr3z2qn3krhxx3zfpgnq43mct9zwzjjqscueml8w349php0aup +10_000,nala,penumbra1ufmv2ztuts8uuwa0lhn5v7fyp7tkrhseddkx54m538qknjrdyexj9kc5lez8s925vrqlkn724elqfq657gdpfr3z2qn3krhxx3zfpgnq43mct9zwzjjqscueml8w349php0aup +5_000_000__000_000,upenumbra,penumbra1qu6svkqwyw5wu3huzuxcygurtvr7fek96akrwaypqq3sqggghaq6xdykx4gh8tvtgegqfyllrpmx4vhfpgq2sxmrfpv9zww5ugjxpfac7304hwhc3v7x0kw2wn9ng68nsejhz8 +50_000,gm,penumbra1qu6svkqwyw5wu3huzuxcygurtvr7fek96akrwaypqq3sqggghaq6xdykx4gh8tvtgegqfyllrpmx4vhfpgq2sxmrfpv9zww5ugjxpfac7304hwhc3v7x0kw2wn9ng68nsejhz8 +50_000,gn,penumbra1qu6svkqwyw5wu3huzuxcygurtvr7fek96akrwaypqq3sqggghaq6xdykx4gh8tvtgegqfyllrpmx4vhfpgq2sxmrfpv9zww5ugjxpfac7304hwhc3v7x0kw2wn9ng68nsejhz8 +25_000,pizza,penumbra1qu6svkqwyw5wu3huzuxcygurtvr7fek96akrwaypqq3sqggghaq6xdykx4gh8tvtgegqfyllrpmx4vhfpgq2sxmrfpv9zww5ugjxpfac7304hwhc3v7x0kw2wn9ng68nsejhz8 +250,cube,penumbra1qu6svkqwyw5wu3huzuxcygurtvr7fek96akrwaypqq3sqggghaq6xdykx4gh8tvtgegqfyllrpmx4vhfpgq2sxmrfpv9zww5ugjxpfac7304hwhc3v7x0kw2wn9ng68nsejhz8 +1_000_000,test_usd,penumbra1qu6svkqwyw5wu3huzuxcygurtvr7fek96akrwaypqq3sqggghaq6xdykx4gh8tvtgegqfyllrpmx4vhfpgq2sxmrfpv9zww5ugjxpfac7304hwhc3v7x0kw2wn9ng68nsejhz8 +10_000,nala,penumbra1qu6svkqwyw5wu3huzuxcygurtvr7fek96akrwaypqq3sqggghaq6xdykx4gh8tvtgegqfyllrpmx4vhfpgq2sxmrfpv9zww5ugjxpfac7304hwhc3v7x0kw2wn9ng68nsejhz8 +5_000_000__000_000,upenumbra,penumbra1ayr3xctcv7l8hqj3fq6fg9m5xnvpx5257dnc8axqeeaw7da8llenv53f4au2vyr8kzu7hh6ehlup5kxcnfqr7sgxzmtmu02qt9sxw2k97txa4x6crprlllvd0ly5ku5nd88cgg +50_000,gm,penumbra1ayr3xctcv7l8hqj3fq6fg9m5xnvpx5257dnc8axqeeaw7da8llenv53f4au2vyr8kzu7hh6ehlup5kxcnfqr7sgxzmtmu02qt9sxw2k97txa4x6crprlllvd0ly5ku5nd88cgg +50_000,gn,penumbra1ayr3xctcv7l8hqj3fq6fg9m5xnvpx5257dnc8axqeeaw7da8llenv53f4au2vyr8kzu7hh6ehlup5kxcnfqr7sgxzmtmu02qt9sxw2k97txa4x6crprlllvd0ly5ku5nd88cgg +25_000,pizza,penumbra1ayr3xctcv7l8hqj3fq6fg9m5xnvpx5257dnc8axqeeaw7da8llenv53f4au2vyr8kzu7hh6ehlup5kxcnfqr7sgxzmtmu02qt9sxw2k97txa4x6crprlllvd0ly5ku5nd88cgg +250,cube,penumbra1ayr3xctcv7l8hqj3fq6fg9m5xnvpx5257dnc8axqeeaw7da8llenv53f4au2vyr8kzu7hh6ehlup5kxcnfqr7sgxzmtmu02qt9sxw2k97txa4x6crprlllvd0ly5ku5nd88cgg +1_000_000,test_usd,penumbra1ayr3xctcv7l8hqj3fq6fg9m5xnvpx5257dnc8axqeeaw7da8llenv53f4au2vyr8kzu7hh6ehlup5kxcnfqr7sgxzmtmu02qt9sxw2k97txa4x6crprlllvd0ly5ku5nd88cgg +10_000,nala,penumbra1ayr3xctcv7l8hqj3fq6fg9m5xnvpx5257dnc8axqeeaw7da8llenv53f4au2vyr8kzu7hh6ehlup5kxcnfqr7sgxzmtmu02qt9sxw2k97txa4x6crprlllvd0ly5ku5nd88cgg +5_000_000__000_000,upenumbra,penumbra1ptthvmv380xzsph57ly92ctr88x285rce9adggsxgmwtve7n2q80lgea7q33gpxyfzevu36jk45sxh9muyj4kx7dhsasnzx8kass5p77j2vg5yp4gzpez3e6w0tqmpmwmr4s0w +50_000,gm,penumbra1ptthvmv380xzsph57ly92ctr88x285rce9adggsxgmwtve7n2q80lgea7q33gpxyfzevu36jk45sxh9muyj4kx7dhsasnzx8kass5p77j2vg5yp4gzpez3e6w0tqmpmwmr4s0w +50_000,gn,penumbra1ptthvmv380xzsph57ly92ctr88x285rce9adggsxgmwtve7n2q80lgea7q33gpxyfzevu36jk45sxh9muyj4kx7dhsasnzx8kass5p77j2vg5yp4gzpez3e6w0tqmpmwmr4s0w +25_000,pizza,penumbra1ptthvmv380xzsph57ly92ctr88x285rce9adggsxgmwtve7n2q80lgea7q33gpxyfzevu36jk45sxh9muyj4kx7dhsasnzx8kass5p77j2vg5yp4gzpez3e6w0tqmpmwmr4s0w +250,cube,penumbra1ptthvmv380xzsph57ly92ctr88x285rce9adggsxgmwtve7n2q80lgea7q33gpxyfzevu36jk45sxh9muyj4kx7dhsasnzx8kass5p77j2vg5yp4gzpez3e6w0tqmpmwmr4s0w +1_000_000,test_usd,penumbra1ptthvmv380xzsph57ly92ctr88x285rce9adggsxgmwtve7n2q80lgea7q33gpxyfzevu36jk45sxh9muyj4kx7dhsasnzx8kass5p77j2vg5yp4gzpez3e6w0tqmpmwmr4s0w +10_000,nala,penumbra1ptthvmv380xzsph57ly92ctr88x285rce9adggsxgmwtve7n2q80lgea7q33gpxyfzevu36jk45sxh9muyj4kx7dhsasnzx8kass5p77j2vg5yp4gzpez3e6w0tqmpmwmr4s0w +5_000_000__000_000,upenumbra,penumbra1jf62rvcyl7pz4373a52r7wyey4tanjwvstgnh8t6a9vatut6pg5xz89mvegjxr6jjxyzdvtfxlenalwcx2pc0pzv93czarx86wvtgs6k7xrjy3ra07wwkgjqzckxsx00g8kawf +50_000,gm,penumbra1jf62rvcyl7pz4373a52r7wyey4tanjwvstgnh8t6a9vatut6pg5xz89mvegjxr6jjxyzdvtfxlenalwcx2pc0pzv93czarx86wvtgs6k7xrjy3ra07wwkgjqzckxsx00g8kawf +50_000,gn,penumbra1jf62rvcyl7pz4373a52r7wyey4tanjwvstgnh8t6a9vatut6pg5xz89mvegjxr6jjxyzdvtfxlenalwcx2pc0pzv93czarx86wvtgs6k7xrjy3ra07wwkgjqzckxsx00g8kawf +25_000,pizza,penumbra1jf62rvcyl7pz4373a52r7wyey4tanjwvstgnh8t6a9vatut6pg5xz89mvegjxr6jjxyzdvtfxlenalwcx2pc0pzv93czarx86wvtgs6k7xrjy3ra07wwkgjqzckxsx00g8kawf +250,cube,penumbra1jf62rvcyl7pz4373a52r7wyey4tanjwvstgnh8t6a9vatut6pg5xz89mvegjxr6jjxyzdvtfxlenalwcx2pc0pzv93czarx86wvtgs6k7xrjy3ra07wwkgjqzckxsx00g8kawf +1_000_000,test_usd,penumbra1jf62rvcyl7pz4373a52r7wyey4tanjwvstgnh8t6a9vatut6pg5xz89mvegjxr6jjxyzdvtfxlenalwcx2pc0pzv93czarx86wvtgs6k7xrjy3ra07wwkgjqzckxsx00g8kawf +10_000,nala,penumbra1jf62rvcyl7pz4373a52r7wyey4tanjwvstgnh8t6a9vatut6pg5xz89mvegjxr6jjxyzdvtfxlenalwcx2pc0pzv93czarx86wvtgs6k7xrjy3ra07wwkgjqzckxsx00g8kawf +5_000_000__000_000,upenumbra,penumbra1grp3dk8vze54vq529lw8p5ankpk795hu5pg4zzjqaefggskqsehjaqsugh9uqavzytpmz8maxf4t3w49jl3ltaqakjejr85hw8qxnv8wlve0zhyhxk904q4yagjxgn9368ak8c +50_000,gm,penumbra1grp3dk8vze54vq529lw8p5ankpk795hu5pg4zzjqaefggskqsehjaqsugh9uqavzytpmz8maxf4t3w49jl3ltaqakjejr85hw8qxnv8wlve0zhyhxk904q4yagjxgn9368ak8c +50_000,gn,penumbra1grp3dk8vze54vq529lw8p5ankpk795hu5pg4zzjqaefggskqsehjaqsugh9uqavzytpmz8maxf4t3w49jl3ltaqakjejr85hw8qxnv8wlve0zhyhxk904q4yagjxgn9368ak8c +25_000,pizza,penumbra1grp3dk8vze54vq529lw8p5ankpk795hu5pg4zzjqaefggskqsehjaqsugh9uqavzytpmz8maxf4t3w49jl3ltaqakjejr85hw8qxnv8wlve0zhyhxk904q4yagjxgn9368ak8c +250,cube,penumbra1grp3dk8vze54vq529lw8p5ankpk795hu5pg4zzjqaefggskqsehjaqsugh9uqavzytpmz8maxf4t3w49jl3ltaqakjejr85hw8qxnv8wlve0zhyhxk904q4yagjxgn9368ak8c +1_000_000,test_usd,penumbra1grp3dk8vze54vq529lw8p5ankpk795hu5pg4zzjqaefggskqsehjaqsugh9uqavzytpmz8maxf4t3w49jl3ltaqakjejr85hw8qxnv8wlve0zhyhxk904q4yagjxgn9368ak8c +10_000,nala,penumbra1grp3dk8vze54vq529lw8p5ankpk795hu5pg4zzjqaefggskqsehjaqsugh9uqavzytpmz8maxf4t3w49jl3ltaqakjejr85hw8qxnv8wlve0zhyhxk904q4yagjxgn9368ak8c +5_000_000__000_000,upenumbra,penumbra18vddyvr33t8ukka0c94spdg63fh4yycwdtl9h3gpf4wfmnn5t2wp64xtmmlexlrjn90um2l6ne75evw8cvu27eufstsm5m7dfs740cw04grtmdsvqyc2vmqz3h4wu6ecg6ejrf +50_000,gm,penumbra18vddyvr33t8ukka0c94spdg63fh4yycwdtl9h3gpf4wfmnn5t2wp64xtmmlexlrjn90um2l6ne75evw8cvu27eufstsm5m7dfs740cw04grtmdsvqyc2vmqz3h4wu6ecg6ejrf +50_000,gn,penumbra18vddyvr33t8ukka0c94spdg63fh4yycwdtl9h3gpf4wfmnn5t2wp64xtmmlexlrjn90um2l6ne75evw8cvu27eufstsm5m7dfs740cw04grtmdsvqyc2vmqz3h4wu6ecg6ejrf +25_000,pizza,penumbra18vddyvr33t8ukka0c94spdg63fh4yycwdtl9h3gpf4wfmnn5t2wp64xtmmlexlrjn90um2l6ne75evw8cvu27eufstsm5m7dfs740cw04grtmdsvqyc2vmqz3h4wu6ecg6ejrf +250,cube,penumbra18vddyvr33t8ukka0c94spdg63fh4yycwdtl9h3gpf4wfmnn5t2wp64xtmmlexlrjn90um2l6ne75evw8cvu27eufstsm5m7dfs740cw04grtmdsvqyc2vmqz3h4wu6ecg6ejrf +1_000_000,test_usd,penumbra18vddyvr33t8ukka0c94spdg63fh4yycwdtl9h3gpf4wfmnn5t2wp64xtmmlexlrjn90um2l6ne75evw8cvu27eufstsm5m7dfs740cw04grtmdsvqyc2vmqz3h4wu6ecg6ejrf +10_000,nala,penumbra18vddyvr33t8ukka0c94spdg63fh4yycwdtl9h3gpf4wfmnn5t2wp64xtmmlexlrjn90um2l6ne75evw8cvu27eufstsm5m7dfs740cw04grtmdsvqyc2vmqz3h4wu6ecg6ejrf +5_000_000__000_000,upenumbra,penumbra1zx97wldrv7fku6p0mnuzr2kx4w2zurg869jvjh369vx5tf46krxh6nep0fc4mr82mmphq9w5jk69dav07ywym4ndndgszskfxkvzs6jke483fuqdr06n67kp86dj3er97tgs2p +50_000,gm,penumbra1zx97wldrv7fku6p0mnuzr2kx4w2zurg869jvjh369vx5tf46krxh6nep0fc4mr82mmphq9w5jk69dav07ywym4ndndgszskfxkvzs6jke483fuqdr06n67kp86dj3er97tgs2p +50_000,gn,penumbra1zx97wldrv7fku6p0mnuzr2kx4w2zurg869jvjh369vx5tf46krxh6nep0fc4mr82mmphq9w5jk69dav07ywym4ndndgszskfxkvzs6jke483fuqdr06n67kp86dj3er97tgs2p +25_000,pizza,penumbra1zx97wldrv7fku6p0mnuzr2kx4w2zurg869jvjh369vx5tf46krxh6nep0fc4mr82mmphq9w5jk69dav07ywym4ndndgszskfxkvzs6jke483fuqdr06n67kp86dj3er97tgs2p +250,cube,penumbra1zx97wldrv7fku6p0mnuzr2kx4w2zurg869jvjh369vx5tf46krxh6nep0fc4mr82mmphq9w5jk69dav07ywym4ndndgszskfxkvzs6jke483fuqdr06n67kp86dj3er97tgs2p +1_000_000,test_usd,penumbra1zx97wldrv7fku6p0mnuzr2kx4w2zurg869jvjh369vx5tf46krxh6nep0fc4mr82mmphq9w5jk69dav07ywym4ndndgszskfxkvzs6jke483fuqdr06n67kp86dj3er97tgs2p +10_000,nala,penumbra1zx97wldrv7fku6p0mnuzr2kx4w2zurg869jvjh369vx5tf46krxh6nep0fc4mr82mmphq9w5jk69dav07ywym4ndndgszskfxkvzs6jke483fuqdr06n67kp86dj3er97tgs2p +5_000_000__000_000,upenumbra,penumbra1s9lauumt0uceehtw03nvslann73vwtr2ytk2ej4vpdlrvxhmuze48hs08kguyyvj6td2epsr4y65wv7t3f5qwvmnx9tmpvltxck5jzycuvrumpv4dctq8339n85ep3la7kfvzs +50_000,gm,penumbra1s9lauumt0uceehtw03nvslann73vwtr2ytk2ej4vpdlrvxhmuze48hs08kguyyvj6td2epsr4y65wv7t3f5qwvmnx9tmpvltxck5jzycuvrumpv4dctq8339n85ep3la7kfvzs +50_000,gn,penumbra1s9lauumt0uceehtw03nvslann73vwtr2ytk2ej4vpdlrvxhmuze48hs08kguyyvj6td2epsr4y65wv7t3f5qwvmnx9tmpvltxck5jzycuvrumpv4dctq8339n85ep3la7kfvzs +25_000,pizza,penumbra1s9lauumt0uceehtw03nvslann73vwtr2ytk2ej4vpdlrvxhmuze48hs08kguyyvj6td2epsr4y65wv7t3f5qwvmnx9tmpvltxck5jzycuvrumpv4dctq8339n85ep3la7kfvzs +250,cube,penumbra1s9lauumt0uceehtw03nvslann73vwtr2ytk2ej4vpdlrvxhmuze48hs08kguyyvj6td2epsr4y65wv7t3f5qwvmnx9tmpvltxck5jzycuvrumpv4dctq8339n85ep3la7kfvzs +1_000_000,test_usd,penumbra1s9lauumt0uceehtw03nvslann73vwtr2ytk2ej4vpdlrvxhmuze48hs08kguyyvj6td2epsr4y65wv7t3f5qwvmnx9tmpvltxck5jzycuvrumpv4dctq8339n85ep3la7kfvzs +10_000,nala,penumbra1s9lauumt0uceehtw03nvslann73vwtr2ytk2ej4vpdlrvxhmuze48hs08kguyyvj6td2epsr4y65wv7t3f5qwvmnx9tmpvltxck5jzycuvrumpv4dctq8339n85ep3la7kfvzs +5_000_000__000_000,upenumbra,penumbra1khrsn8jqw8gxswqesq736zknfmtaeuccqrw4dlspn96hqnq80cxx32deke486tqf0644zxerkarwv8kwruh8yq45tegvavmc7uqj42cz77ecfhxcv0nn79vda60ccwsljxh34r +50_000,gm,penumbra1khrsn8jqw8gxswqesq736zknfmtaeuccqrw4dlspn96hqnq80cxx32deke486tqf0644zxerkarwv8kwruh8yq45tegvavmc7uqj42cz77ecfhxcv0nn79vda60ccwsljxh34r +50_000,gn,penumbra1khrsn8jqw8gxswqesq736zknfmtaeuccqrw4dlspn96hqnq80cxx32deke486tqf0644zxerkarwv8kwruh8yq45tegvavmc7uqj42cz77ecfhxcv0nn79vda60ccwsljxh34r +25_000,pizza,penumbra1khrsn8jqw8gxswqesq736zknfmtaeuccqrw4dlspn96hqnq80cxx32deke486tqf0644zxerkarwv8kwruh8yq45tegvavmc7uqj42cz77ecfhxcv0nn79vda60ccwsljxh34r +250,cube,penumbra1khrsn8jqw8gxswqesq736zknfmtaeuccqrw4dlspn96hqnq80cxx32deke486tqf0644zxerkarwv8kwruh8yq45tegvavmc7uqj42cz77ecfhxcv0nn79vda60ccwsljxh34r +1_000_000,test_usd,penumbra1khrsn8jqw8gxswqesq736zknfmtaeuccqrw4dlspn96hqnq80cxx32deke486tqf0644zxerkarwv8kwruh8yq45tegvavmc7uqj42cz77ecfhxcv0nn79vda60ccwsljxh34r +10_000,nala,penumbra1khrsn8jqw8gxswqesq736zknfmtaeuccqrw4dlspn96hqnq80cxx32deke486tqf0644zxerkarwv8kwruh8yq45tegvavmc7uqj42cz77ecfhxcv0nn79vda60ccwsljxh34r +5_000_000__000_000,upenumbra,penumbra1fuc2w752s2q4jzeayfhvvfusqndwplkz2hshuyr65cr9k33tnk8acc7crlfhkvqss9qan0j3q7cv6pagv44ljcu0jwk8g529n2yg539lnezjxajkmdf3fp2mq9rvjx9vv9es2a +50_000,gm,penumbra1fuc2w752s2q4jzeayfhvvfusqndwplkz2hshuyr65cr9k33tnk8acc7crlfhkvqss9qan0j3q7cv6pagv44ljcu0jwk8g529n2yg539lnezjxajkmdf3fp2mq9rvjx9vv9es2a +50_000,gn,penumbra1fuc2w752s2q4jzeayfhvvfusqndwplkz2hshuyr65cr9k33tnk8acc7crlfhkvqss9qan0j3q7cv6pagv44ljcu0jwk8g529n2yg539lnezjxajkmdf3fp2mq9rvjx9vv9es2a +25_000,pizza,penumbra1fuc2w752s2q4jzeayfhvvfusqndwplkz2hshuyr65cr9k33tnk8acc7crlfhkvqss9qan0j3q7cv6pagv44ljcu0jwk8g529n2yg539lnezjxajkmdf3fp2mq9rvjx9vv9es2a +250,cube,penumbra1fuc2w752s2q4jzeayfhvvfusqndwplkz2hshuyr65cr9k33tnk8acc7crlfhkvqss9qan0j3q7cv6pagv44ljcu0jwk8g529n2yg539lnezjxajkmdf3fp2mq9rvjx9vv9es2a +1_000_000,test_usd,penumbra1fuc2w752s2q4jzeayfhvvfusqndwplkz2hshuyr65cr9k33tnk8acc7crlfhkvqss9qan0j3q7cv6pagv44ljcu0jwk8g529n2yg539lnezjxajkmdf3fp2mq9rvjx9vv9es2a +10_000,nala,penumbra1fuc2w752s2q4jzeayfhvvfusqndwplkz2hshuyr65cr9k33tnk8acc7crlfhkvqss9qan0j3q7cv6pagv44ljcu0jwk8g529n2yg539lnezjxajkmdf3fp2mq9rvjx9vv9es2a +5_000_000__000_000,upenumbra,penumbra1n4y4k77hw5hl246mk8u4m7sw3muhgg6m4qmen3dvmz38vctw2htt4y4twwtmgpexcd7us6u7fa3xfmzst7yj3ur85mmmxas0xgj8d0aksqz9he3v4lhll0rmqgvxkgj827vuc0 +50_000,gm,penumbra1n4y4k77hw5hl246mk8u4m7sw3muhgg6m4qmen3dvmz38vctw2htt4y4twwtmgpexcd7us6u7fa3xfmzst7yj3ur85mmmxas0xgj8d0aksqz9he3v4lhll0rmqgvxkgj827vuc0 +50_000,gn,penumbra1n4y4k77hw5hl246mk8u4m7sw3muhgg6m4qmen3dvmz38vctw2htt4y4twwtmgpexcd7us6u7fa3xfmzst7yj3ur85mmmxas0xgj8d0aksqz9he3v4lhll0rmqgvxkgj827vuc0 +25_000,pizza,penumbra1n4y4k77hw5hl246mk8u4m7sw3muhgg6m4qmen3dvmz38vctw2htt4y4twwtmgpexcd7us6u7fa3xfmzst7yj3ur85mmmxas0xgj8d0aksqz9he3v4lhll0rmqgvxkgj827vuc0 +250,cube,penumbra1n4y4k77hw5hl246mk8u4m7sw3muhgg6m4qmen3dvmz38vctw2htt4y4twwtmgpexcd7us6u7fa3xfmzst7yj3ur85mmmxas0xgj8d0aksqz9he3v4lhll0rmqgvxkgj827vuc0 +1_000_000,test_usd,penumbra1n4y4k77hw5hl246mk8u4m7sw3muhgg6m4qmen3dvmz38vctw2htt4y4twwtmgpexcd7us6u7fa3xfmzst7yj3ur85mmmxas0xgj8d0aksqz9he3v4lhll0rmqgvxkgj827vuc0 +10_000,nala,penumbra1n4y4k77hw5hl246mk8u4m7sw3muhgg6m4qmen3dvmz38vctw2htt4y4twwtmgpexcd7us6u7fa3xfmzst7yj3ur85mmmxas0xgj8d0aksqz9he3v4lhll0rmqgvxkgj827vuc0 +5_000_000__000_000,upenumbra,penumbra1lr0ns0jwkdkzzthplehh4wga8qnwcys8gxn9wrgvxvg4ypxfd04s3y4kw0nz8apw99ny84yd4wcks0z6vt7djdrh3hnsxgewn3d354s757jxj6c47vufr023wlgy8zsphy2669 +50_000,gm,penumbra1lr0ns0jwkdkzzthplehh4wga8qnwcys8gxn9wrgvxvg4ypxfd04s3y4kw0nz8apw99ny84yd4wcks0z6vt7djdrh3hnsxgewn3d354s757jxj6c47vufr023wlgy8zsphy2669 +50_000,gn,penumbra1lr0ns0jwkdkzzthplehh4wga8qnwcys8gxn9wrgvxvg4ypxfd04s3y4kw0nz8apw99ny84yd4wcks0z6vt7djdrh3hnsxgewn3d354s757jxj6c47vufr023wlgy8zsphy2669 +25_000,pizza,penumbra1lr0ns0jwkdkzzthplehh4wga8qnwcys8gxn9wrgvxvg4ypxfd04s3y4kw0nz8apw99ny84yd4wcks0z6vt7djdrh3hnsxgewn3d354s757jxj6c47vufr023wlgy8zsphy2669 +250,cube,penumbra1lr0ns0jwkdkzzthplehh4wga8qnwcys8gxn9wrgvxvg4ypxfd04s3y4kw0nz8apw99ny84yd4wcks0z6vt7djdrh3hnsxgewn3d354s757jxj6c47vufr023wlgy8zsphy2669 +1_000_000,test_usd,penumbra1lr0ns0jwkdkzzthplehh4wga8qnwcys8gxn9wrgvxvg4ypxfd04s3y4kw0nz8apw99ny84yd4wcks0z6vt7djdrh3hnsxgewn3d354s757jxj6c47vufr023wlgy8zsphy2669 +10_000,nala,penumbra1lr0ns0jwkdkzzthplehh4wga8qnwcys8gxn9wrgvxvg4ypxfd04s3y4kw0nz8apw99ny84yd4wcks0z6vt7djdrh3hnsxgewn3d354s757jxj6c47vufr023wlgy8zsphy2669 +5_000_000__000_000,upenumbra,penumbra1uhe5hvhdf583s0ldyrurtatj78pa2tmzt0lcf9awgu4fezkrdaxsllmy39l59xqwxaqhx88ganfgkdnjqp8wmvf0s5uy4r9nmglyd497wsu7ny49jfth3dcjhhgs6d6mjc0fxq +50_000,gm,penumbra1uhe5hvhdf583s0ldyrurtatj78pa2tmzt0lcf9awgu4fezkrdaxsllmy39l59xqwxaqhx88ganfgkdnjqp8wmvf0s5uy4r9nmglyd497wsu7ny49jfth3dcjhhgs6d6mjc0fxq +50_000,gn,penumbra1uhe5hvhdf583s0ldyrurtatj78pa2tmzt0lcf9awgu4fezkrdaxsllmy39l59xqwxaqhx88ganfgkdnjqp8wmvf0s5uy4r9nmglyd497wsu7ny49jfth3dcjhhgs6d6mjc0fxq +25_000,pizza,penumbra1uhe5hvhdf583s0ldyrurtatj78pa2tmzt0lcf9awgu4fezkrdaxsllmy39l59xqwxaqhx88ganfgkdnjqp8wmvf0s5uy4r9nmglyd497wsu7ny49jfth3dcjhhgs6d6mjc0fxq +250,cube,penumbra1uhe5hvhdf583s0ldyrurtatj78pa2tmzt0lcf9awgu4fezkrdaxsllmy39l59xqwxaqhx88ganfgkdnjqp8wmvf0s5uy4r9nmglyd497wsu7ny49jfth3dcjhhgs6d6mjc0fxq +1_000_000,test_usd,penumbra1uhe5hvhdf583s0ldyrurtatj78pa2tmzt0lcf9awgu4fezkrdaxsllmy39l59xqwxaqhx88ganfgkdnjqp8wmvf0s5uy4r9nmglyd497wsu7ny49jfth3dcjhhgs6d6mjc0fxq +10_000,nala,penumbra1uhe5hvhdf583s0ldyrurtatj78pa2tmzt0lcf9awgu4fezkrdaxsllmy39l59xqwxaqhx88ganfgkdnjqp8wmvf0s5uy4r9nmglyd497wsu7ny49jfth3dcjhhgs6d6mjc0fxq +5_000_000__000_000,upenumbra,penumbra1fljn093ptt5vxyyd0pgxkqjdu7sqhutsz9q6r0h3fhedw25eta7plthmz80yyvprf4yue64htzts20l9398m6w0uqqe42cn5jefupqa4q5lcez27zwwv3z8aq87pl34r3dddkq +50_000,gm,penumbra1fljn093ptt5vxyyd0pgxkqjdu7sqhutsz9q6r0h3fhedw25eta7plthmz80yyvprf4yue64htzts20l9398m6w0uqqe42cn5jefupqa4q5lcez27zwwv3z8aq87pl34r3dddkq +50_000,gn,penumbra1fljn093ptt5vxyyd0pgxkqjdu7sqhutsz9q6r0h3fhedw25eta7plthmz80yyvprf4yue64htzts20l9398m6w0uqqe42cn5jefupqa4q5lcez27zwwv3z8aq87pl34r3dddkq +25_000,pizza,penumbra1fljn093ptt5vxyyd0pgxkqjdu7sqhutsz9q6r0h3fhedw25eta7plthmz80yyvprf4yue64htzts20l9398m6w0uqqe42cn5jefupqa4q5lcez27zwwv3z8aq87pl34r3dddkq +250,cube,penumbra1fljn093ptt5vxyyd0pgxkqjdu7sqhutsz9q6r0h3fhedw25eta7plthmz80yyvprf4yue64htzts20l9398m6w0uqqe42cn5jefupqa4q5lcez27zwwv3z8aq87pl34r3dddkq +1_000_000,test_usd,penumbra1fljn093ptt5vxyyd0pgxkqjdu7sqhutsz9q6r0h3fhedw25eta7plthmz80yyvprf4yue64htzts20l9398m6w0uqqe42cn5jefupqa4q5lcez27zwwv3z8aq87pl34r3dddkq +10_000,nala,penumbra1fljn093ptt5vxyyd0pgxkqjdu7sqhutsz9q6r0h3fhedw25eta7plthmz80yyvprf4yue64htzts20l9398m6w0uqqe42cn5jefupqa4q5lcez27zwwv3z8aq87pl34r3dddkq +5_000_000__000_000,upenumbra,penumbra13ftasxxac2c3288dsud8decvk98gql0lapgcelj4zd4nx6h9086zlt5pdlk7wnh799fr8e8wqyh082uf4x5jhk5wpl6p9smgwp8dkrvryha5cx2psq6s03utsu7ce3qyzl30at +50_000,gm,penumbra13ftasxxac2c3288dsud8decvk98gql0lapgcelj4zd4nx6h9086zlt5pdlk7wnh799fr8e8wqyh082uf4x5jhk5wpl6p9smgwp8dkrvryha5cx2psq6s03utsu7ce3qyzl30at +50_000,gn,penumbra13ftasxxac2c3288dsud8decvk98gql0lapgcelj4zd4nx6h9086zlt5pdlk7wnh799fr8e8wqyh082uf4x5jhk5wpl6p9smgwp8dkrvryha5cx2psq6s03utsu7ce3qyzl30at +25_000,pizza,penumbra13ftasxxac2c3288dsud8decvk98gql0lapgcelj4zd4nx6h9086zlt5pdlk7wnh799fr8e8wqyh082uf4x5jhk5wpl6p9smgwp8dkrvryha5cx2psq6s03utsu7ce3qyzl30at +250,cube,penumbra13ftasxxac2c3288dsud8decvk98gql0lapgcelj4zd4nx6h9086zlt5pdlk7wnh799fr8e8wqyh082uf4x5jhk5wpl6p9smgwp8dkrvryha5cx2psq6s03utsu7ce3qyzl30at +1_000_000,test_usd,penumbra13ftasxxac2c3288dsud8decvk98gql0lapgcelj4zd4nx6h9086zlt5pdlk7wnh799fr8e8wqyh082uf4x5jhk5wpl6p9smgwp8dkrvryha5cx2psq6s03utsu7ce3qyzl30at +10_000,nala,penumbra13ftasxxac2c3288dsud8decvk98gql0lapgcelj4zd4nx6h9086zlt5pdlk7wnh799fr8e8wqyh082uf4x5jhk5wpl6p9smgwp8dkrvryha5cx2psq6s03utsu7ce3qyzl30at +5_000_000__000_000,upenumbra,penumbra1mjnf0wpx7slv5ztzhk5fztg7xauack82w8ey6yqna5aqpygjap350qnrelpnlz5dv9zzczate02s4uu89tf87c6d85yatauhzg6rk08u25epzeu7efks704uqn4glx5dsqscer +50_000,gm,penumbra1mjnf0wpx7slv5ztzhk5fztg7xauack82w8ey6yqna5aqpygjap350qnrelpnlz5dv9zzczate02s4uu89tf87c6d85yatauhzg6rk08u25epzeu7efks704uqn4glx5dsqscer +50_000,gn,penumbra1mjnf0wpx7slv5ztzhk5fztg7xauack82w8ey6yqna5aqpygjap350qnrelpnlz5dv9zzczate02s4uu89tf87c6d85yatauhzg6rk08u25epzeu7efks704uqn4glx5dsqscer +25_000,pizza,penumbra1mjnf0wpx7slv5ztzhk5fztg7xauack82w8ey6yqna5aqpygjap350qnrelpnlz5dv9zzczate02s4uu89tf87c6d85yatauhzg6rk08u25epzeu7efks704uqn4glx5dsqscer +250,cube,penumbra1mjnf0wpx7slv5ztzhk5fztg7xauack82w8ey6yqna5aqpygjap350qnrelpnlz5dv9zzczate02s4uu89tf87c6d85yatauhzg6rk08u25epzeu7efks704uqn4glx5dsqscer +1_000_000,test_usd,penumbra1mjnf0wpx7slv5ztzhk5fztg7xauack82w8ey6yqna5aqpygjap350qnrelpnlz5dv9zzczate02s4uu89tf87c6d85yatauhzg6rk08u25epzeu7efks704uqn4glx5dsqscer +10_000,nala,penumbra1mjnf0wpx7slv5ztzhk5fztg7xauack82w8ey6yqna5aqpygjap350qnrelpnlz5dv9zzczate02s4uu89tf87c6d85yatauhzg6rk08u25epzeu7efks704uqn4glx5dsqscer +5_000_000__000_000,upenumbra,penumbra13fwnptqqpvq8r8v90phns04sewtdls8mtcce35sqffypdl8r8az0ayw926lnef508sk4wa9njxxfyp84tcexljgv0mq8vcz2fesrrth8wrkmwcj3q7k7jyk6lcet9pfwyvj5yp +50_000,gm,penumbra13fwnptqqpvq8r8v90phns04sewtdls8mtcce35sqffypdl8r8az0ayw926lnef508sk4wa9njxxfyp84tcexljgv0mq8vcz2fesrrth8wrkmwcj3q7k7jyk6lcet9pfwyvj5yp +50_000,gn,penumbra13fwnptqqpvq8r8v90phns04sewtdls8mtcce35sqffypdl8r8az0ayw926lnef508sk4wa9njxxfyp84tcexljgv0mq8vcz2fesrrth8wrkmwcj3q7k7jyk6lcet9pfwyvj5yp +25_000,pizza,penumbra13fwnptqqpvq8r8v90phns04sewtdls8mtcce35sqffypdl8r8az0ayw926lnef508sk4wa9njxxfyp84tcexljgv0mq8vcz2fesrrth8wrkmwcj3q7k7jyk6lcet9pfwyvj5yp +250,cube,penumbra13fwnptqqpvq8r8v90phns04sewtdls8mtcce35sqffypdl8r8az0ayw926lnef508sk4wa9njxxfyp84tcexljgv0mq8vcz2fesrrth8wrkmwcj3q7k7jyk6lcet9pfwyvj5yp +1_000_000,test_usd,penumbra13fwnptqqpvq8r8v90phns04sewtdls8mtcce35sqffypdl8r8az0ayw926lnef508sk4wa9njxxfyp84tcexljgv0mq8vcz2fesrrth8wrkmwcj3q7k7jyk6lcet9pfwyvj5yp +10_000,nala,penumbra13fwnptqqpvq8r8v90phns04sewtdls8mtcce35sqffypdl8r8az0ayw926lnef508sk4wa9njxxfyp84tcexljgv0mq8vcz2fesrrth8wrkmwcj3q7k7jyk6lcet9pfwyvj5yp +5_000_000__000_000,upenumbra,penumbra1zy3rrk6dk0r25kq6kfaecs55rlh8egdx2crs54anz70qtlx6jyyywv4g2ah8hh6pjftzr9w296zrjcagys4ztdhwfnjuvdj3js6ltmld48wwl4m0kgmr3asmdxw85ty79lddv0 +50_000,gm,penumbra1zy3rrk6dk0r25kq6kfaecs55rlh8egdx2crs54anz70qtlx6jyyywv4g2ah8hh6pjftzr9w296zrjcagys4ztdhwfnjuvdj3js6ltmld48wwl4m0kgmr3asmdxw85ty79lddv0 +50_000,gn,penumbra1zy3rrk6dk0r25kq6kfaecs55rlh8egdx2crs54anz70qtlx6jyyywv4g2ah8hh6pjftzr9w296zrjcagys4ztdhwfnjuvdj3js6ltmld48wwl4m0kgmr3asmdxw85ty79lddv0 +25_000,pizza,penumbra1zy3rrk6dk0r25kq6kfaecs55rlh8egdx2crs54anz70qtlx6jyyywv4g2ah8hh6pjftzr9w296zrjcagys4ztdhwfnjuvdj3js6ltmld48wwl4m0kgmr3asmdxw85ty79lddv0 +250,cube,penumbra1zy3rrk6dk0r25kq6kfaecs55rlh8egdx2crs54anz70qtlx6jyyywv4g2ah8hh6pjftzr9w296zrjcagys4ztdhwfnjuvdj3js6ltmld48wwl4m0kgmr3asmdxw85ty79lddv0 +1_000_000,test_usd,penumbra1zy3rrk6dk0r25kq6kfaecs55rlh8egdx2crs54anz70qtlx6jyyywv4g2ah8hh6pjftzr9w296zrjcagys4ztdhwfnjuvdj3js6ltmld48wwl4m0kgmr3asmdxw85ty79lddv0 +10_000,nala,penumbra1zy3rrk6dk0r25kq6kfaecs55rlh8egdx2crs54anz70qtlx6jyyywv4g2ah8hh6pjftzr9w296zrjcagys4ztdhwfnjuvdj3js6ltmld48wwl4m0kgmr3asmdxw85ty79lddv0 +5_000_000__000_000,upenumbra,penumbra1xuxvkfwszgu4s6s3c44kcpr40lawrddaknsqk705tfcea2fxvmk87kkurj7my4w92jraxrmfjlyvp0fqpyttpp5tqt96pw54nnndjglgmqjnjev9drtrmrx2x60ejr3sf08r82 +50_000,gm,penumbra1xuxvkfwszgu4s6s3c44kcpr40lawrddaknsqk705tfcea2fxvmk87kkurj7my4w92jraxrmfjlyvp0fqpyttpp5tqt96pw54nnndjglgmqjnjev9drtrmrx2x60ejr3sf08r82 +50_000,gn,penumbra1xuxvkfwszgu4s6s3c44kcpr40lawrddaknsqk705tfcea2fxvmk87kkurj7my4w92jraxrmfjlyvp0fqpyttpp5tqt96pw54nnndjglgmqjnjev9drtrmrx2x60ejr3sf08r82 +25_000,pizza,penumbra1xuxvkfwszgu4s6s3c44kcpr40lawrddaknsqk705tfcea2fxvmk87kkurj7my4w92jraxrmfjlyvp0fqpyttpp5tqt96pw54nnndjglgmqjnjev9drtrmrx2x60ejr3sf08r82 +250,cube,penumbra1xuxvkfwszgu4s6s3c44kcpr40lawrddaknsqk705tfcea2fxvmk87kkurj7my4w92jraxrmfjlyvp0fqpyttpp5tqt96pw54nnndjglgmqjnjev9drtrmrx2x60ejr3sf08r82 +1_000_000,test_usd,penumbra1xuxvkfwszgu4s6s3c44kcpr40lawrddaknsqk705tfcea2fxvmk87kkurj7my4w92jraxrmfjlyvp0fqpyttpp5tqt96pw54nnndjglgmqjnjev9drtrmrx2x60ejr3sf08r82 +10_000,nala,penumbra1xuxvkfwszgu4s6s3c44kcpr40lawrddaknsqk705tfcea2fxvmk87kkurj7my4w92jraxrmfjlyvp0fqpyttpp5tqt96pw54nnndjglgmqjnjev9drtrmrx2x60ejr3sf08r82 +5_000_000__000_000,upenumbra,penumbra10r8mu4dzf8v3a4k0q30h300e7y9pns0u76xhagqfwqr2wt6pqdgwjf4q3uq6wwn8mhas0w2a9qm3quqjk09yljetuwaf29x2r8jq69l93euhjhhfwkdrnk7x7zn7cfa2xjuhk5 +50_000,gm,penumbra10r8mu4dzf8v3a4k0q30h300e7y9pns0u76xhagqfwqr2wt6pqdgwjf4q3uq6wwn8mhas0w2a9qm3quqjk09yljetuwaf29x2r8jq69l93euhjhhfwkdrnk7x7zn7cfa2xjuhk5 +50_000,gn,penumbra10r8mu4dzf8v3a4k0q30h300e7y9pns0u76xhagqfwqr2wt6pqdgwjf4q3uq6wwn8mhas0w2a9qm3quqjk09yljetuwaf29x2r8jq69l93euhjhhfwkdrnk7x7zn7cfa2xjuhk5 +25_000,pizza,penumbra10r8mu4dzf8v3a4k0q30h300e7y9pns0u76xhagqfwqr2wt6pqdgwjf4q3uq6wwn8mhas0w2a9qm3quqjk09yljetuwaf29x2r8jq69l93euhjhhfwkdrnk7x7zn7cfa2xjuhk5 +250,cube,penumbra10r8mu4dzf8v3a4k0q30h300e7y9pns0u76xhagqfwqr2wt6pqdgwjf4q3uq6wwn8mhas0w2a9qm3quqjk09yljetuwaf29x2r8jq69l93euhjhhfwkdrnk7x7zn7cfa2xjuhk5 +1_000_000,test_usd,penumbra10r8mu4dzf8v3a4k0q30h300e7y9pns0u76xhagqfwqr2wt6pqdgwjf4q3uq6wwn8mhas0w2a9qm3quqjk09yljetuwaf29x2r8jq69l93euhjhhfwkdrnk7x7zn7cfa2xjuhk5 +10_000,nala,penumbra10r8mu4dzf8v3a4k0q30h300e7y9pns0u76xhagqfwqr2wt6pqdgwjf4q3uq6wwn8mhas0w2a9qm3quqjk09yljetuwaf29x2r8jq69l93euhjhhfwkdrnk7x7zn7cfa2xjuhk5 +5_000_000__000_000,upenumbra,penumbra1yw9pvm5ujry02yv7f67czu2pgqf36akpmvq90tk6f3h7pwn6y5vqnsg90ejn7zvu7rvmxd6stz4cqdazk0fvyurkvrkzau2wn275xaxfszsrd0mxqpegprdpu8wtjrk3snc2vx +50_000,gm,penumbra1yw9pvm5ujry02yv7f67czu2pgqf36akpmvq90tk6f3h7pwn6y5vqnsg90ejn7zvu7rvmxd6stz4cqdazk0fvyurkvrkzau2wn275xaxfszsrd0mxqpegprdpu8wtjrk3snc2vx +50_000,gn,penumbra1yw9pvm5ujry02yv7f67czu2pgqf36akpmvq90tk6f3h7pwn6y5vqnsg90ejn7zvu7rvmxd6stz4cqdazk0fvyurkvrkzau2wn275xaxfszsrd0mxqpegprdpu8wtjrk3snc2vx +25_000,pizza,penumbra1yw9pvm5ujry02yv7f67czu2pgqf36akpmvq90tk6f3h7pwn6y5vqnsg90ejn7zvu7rvmxd6stz4cqdazk0fvyurkvrkzau2wn275xaxfszsrd0mxqpegprdpu8wtjrk3snc2vx +250,cube,penumbra1yw9pvm5ujry02yv7f67czu2pgqf36akpmvq90tk6f3h7pwn6y5vqnsg90ejn7zvu7rvmxd6stz4cqdazk0fvyurkvrkzau2wn275xaxfszsrd0mxqpegprdpu8wtjrk3snc2vx +1_000_000,test_usd,penumbra1yw9pvm5ujry02yv7f67czu2pgqf36akpmvq90tk6f3h7pwn6y5vqnsg90ejn7zvu7rvmxd6stz4cqdazk0fvyurkvrkzau2wn275xaxfszsrd0mxqpegprdpu8wtjrk3snc2vx +10_000,nala,penumbra1yw9pvm5ujry02yv7f67czu2pgqf36akpmvq90tk6f3h7pwn6y5vqnsg90ejn7zvu7rvmxd6stz4cqdazk0fvyurkvrkzau2wn275xaxfszsrd0mxqpegprdpu8wtjrk3snc2vx +5_000_000__000_000,upenumbra,penumbra19ps33tjkfgntse69wewdgk2axx0tj5h23mvsw436h7pnnr3jnfxfksh0jw8hy0mssp4hpwkkh6pje5jfm4kkj2j59s3tdaweka9sywktqfkxkha6nzj36vatvfjzpglvxyh4xz +50_000,gm,penumbra19ps33tjkfgntse69wewdgk2axx0tj5h23mvsw436h7pnnr3jnfxfksh0jw8hy0mssp4hpwkkh6pje5jfm4kkj2j59s3tdaweka9sywktqfkxkha6nzj36vatvfjzpglvxyh4xz +50_000,gn,penumbra19ps33tjkfgntse69wewdgk2axx0tj5h23mvsw436h7pnnr3jnfxfksh0jw8hy0mssp4hpwkkh6pje5jfm4kkj2j59s3tdaweka9sywktqfkxkha6nzj36vatvfjzpglvxyh4xz +25_000,pizza,penumbra19ps33tjkfgntse69wewdgk2axx0tj5h23mvsw436h7pnnr3jnfxfksh0jw8hy0mssp4hpwkkh6pje5jfm4kkj2j59s3tdaweka9sywktqfkxkha6nzj36vatvfjzpglvxyh4xz +250,cube,penumbra19ps33tjkfgntse69wewdgk2axx0tj5h23mvsw436h7pnnr3jnfxfksh0jw8hy0mssp4hpwkkh6pje5jfm4kkj2j59s3tdaweka9sywktqfkxkha6nzj36vatvfjzpglvxyh4xz +1_000_000,test_usd,penumbra19ps33tjkfgntse69wewdgk2axx0tj5h23mvsw436h7pnnr3jnfxfksh0jw8hy0mssp4hpwkkh6pje5jfm4kkj2j59s3tdaweka9sywktqfkxkha6nzj36vatvfjzpglvxyh4xz +10_000,nala,penumbra19ps33tjkfgntse69wewdgk2axx0tj5h23mvsw436h7pnnr3jnfxfksh0jw8hy0mssp4hpwkkh6pje5jfm4kkj2j59s3tdaweka9sywktqfkxkha6nzj36vatvfjzpglvxyh4xz +5_000_000__000_000,upenumbra,penumbra1d3au0cyjjzhmlr9umplm8s5v8hmw9242r8ve2j9rsd7m72npw6snw8s4r0j4cur8n9c7l6mwf9884arxd5cmw9c8htmym46nap3v8vpmnf0kcyrj9q44wcgvcjyjaegvptwvk5 +50_000,gm,penumbra1d3au0cyjjzhmlr9umplm8s5v8hmw9242r8ve2j9rsd7m72npw6snw8s4r0j4cur8n9c7l6mwf9884arxd5cmw9c8htmym46nap3v8vpmnf0kcyrj9q44wcgvcjyjaegvptwvk5 +50_000,gn,penumbra1d3au0cyjjzhmlr9umplm8s5v8hmw9242r8ve2j9rsd7m72npw6snw8s4r0j4cur8n9c7l6mwf9884arxd5cmw9c8htmym46nap3v8vpmnf0kcyrj9q44wcgvcjyjaegvptwvk5 +25_000,pizza,penumbra1d3au0cyjjzhmlr9umplm8s5v8hmw9242r8ve2j9rsd7m72npw6snw8s4r0j4cur8n9c7l6mwf9884arxd5cmw9c8htmym46nap3v8vpmnf0kcyrj9q44wcgvcjyjaegvptwvk5 +250,cube,penumbra1d3au0cyjjzhmlr9umplm8s5v8hmw9242r8ve2j9rsd7m72npw6snw8s4r0j4cur8n9c7l6mwf9884arxd5cmw9c8htmym46nap3v8vpmnf0kcyrj9q44wcgvcjyjaegvptwvk5 +1_000_000,test_usd,penumbra1d3au0cyjjzhmlr9umplm8s5v8hmw9242r8ve2j9rsd7m72npw6snw8s4r0j4cur8n9c7l6mwf9884arxd5cmw9c8htmym46nap3v8vpmnf0kcyrj9q44wcgvcjyjaegvptwvk5 +10_000,nala,penumbra1d3au0cyjjzhmlr9umplm8s5v8hmw9242r8ve2j9rsd7m72npw6snw8s4r0j4cur8n9c7l6mwf9884arxd5cmw9c8htmym46nap3v8vpmnf0kcyrj9q44wcgvcjyjaegvptwvk5 +5_000_000__000_000,upenumbra,penumbra1r3jn4v5jgl4ed3a9wg065zvajzu7kgcpdmtejjj9a0shtu4wy507vfcpdyx5t8pevs2k99a86kd4r4rghu5f7kd0h8n8p389cp973w82n5ylamhr95d4qz9wftrn3cwu2pfgzu +50_000,gm,penumbra1r3jn4v5jgl4ed3a9wg065zvajzu7kgcpdmtejjj9a0shtu4wy507vfcpdyx5t8pevs2k99a86kd4r4rghu5f7kd0h8n8p389cp973w82n5ylamhr95d4qz9wftrn3cwu2pfgzu +50_000,gn,penumbra1r3jn4v5jgl4ed3a9wg065zvajzu7kgcpdmtejjj9a0shtu4wy507vfcpdyx5t8pevs2k99a86kd4r4rghu5f7kd0h8n8p389cp973w82n5ylamhr95d4qz9wftrn3cwu2pfgzu +25_000,pizza,penumbra1r3jn4v5jgl4ed3a9wg065zvajzu7kgcpdmtejjj9a0shtu4wy507vfcpdyx5t8pevs2k99a86kd4r4rghu5f7kd0h8n8p389cp973w82n5ylamhr95d4qz9wftrn3cwu2pfgzu +250,cube,penumbra1r3jn4v5jgl4ed3a9wg065zvajzu7kgcpdmtejjj9a0shtu4wy507vfcpdyx5t8pevs2k99a86kd4r4rghu5f7kd0h8n8p389cp973w82n5ylamhr95d4qz9wftrn3cwu2pfgzu +1_000_000,test_usd,penumbra1r3jn4v5jgl4ed3a9wg065zvajzu7kgcpdmtejjj9a0shtu4wy507vfcpdyx5t8pevs2k99a86kd4r4rghu5f7kd0h8n8p389cp973w82n5ylamhr95d4qz9wftrn3cwu2pfgzu +10_000,nala,penumbra1r3jn4v5jgl4ed3a9wg065zvajzu7kgcpdmtejjj9a0shtu4wy507vfcpdyx5t8pevs2k99a86kd4r4rghu5f7kd0h8n8p389cp973w82n5ylamhr95d4qz9wftrn3cwu2pfgzu +5_000_000__000_000,upenumbra,penumbra1zearguc6jdx29yevaxacyfvhwmzvah72lwfw8ejtjezcs7u7vxpz7etwujqq0zmh0aqlkcfx2nednv57vpzj5gc7dg6wcjshdr05rdfza4uzp6tp0lyj2xafd6msp3p62u3h0w +50_000,gm,penumbra1zearguc6jdx29yevaxacyfvhwmzvah72lwfw8ejtjezcs7u7vxpz7etwujqq0zmh0aqlkcfx2nednv57vpzj5gc7dg6wcjshdr05rdfza4uzp6tp0lyj2xafd6msp3p62u3h0w +50_000,gn,penumbra1zearguc6jdx29yevaxacyfvhwmzvah72lwfw8ejtjezcs7u7vxpz7etwujqq0zmh0aqlkcfx2nednv57vpzj5gc7dg6wcjshdr05rdfza4uzp6tp0lyj2xafd6msp3p62u3h0w +25_000,pizza,penumbra1zearguc6jdx29yevaxacyfvhwmzvah72lwfw8ejtjezcs7u7vxpz7etwujqq0zmh0aqlkcfx2nednv57vpzj5gc7dg6wcjshdr05rdfza4uzp6tp0lyj2xafd6msp3p62u3h0w +250,cube,penumbra1zearguc6jdx29yevaxacyfvhwmzvah72lwfw8ejtjezcs7u7vxpz7etwujqq0zmh0aqlkcfx2nednv57vpzj5gc7dg6wcjshdr05rdfza4uzp6tp0lyj2xafd6msp3p62u3h0w +1_000_000,test_usd,penumbra1zearguc6jdx29yevaxacyfvhwmzvah72lwfw8ejtjezcs7u7vxpz7etwujqq0zmh0aqlkcfx2nednv57vpzj5gc7dg6wcjshdr05rdfza4uzp6tp0lyj2xafd6msp3p62u3h0w +10_000,nala,penumbra1zearguc6jdx29yevaxacyfvhwmzvah72lwfw8ejtjezcs7u7vxpz7etwujqq0zmh0aqlkcfx2nednv57vpzj5gc7dg6wcjshdr05rdfza4uzp6tp0lyj2xafd6msp3p62u3h0w +5_000_000__000_000,upenumbra,penumbra1vw6j4uankvltjwz08khc7tx0u7fsgjc4x58skfvxfda079rqys73y88kwank6mncr5xewz4ahy7863s0flnshvqyaapxtcjv0wrmtpgdue9f8v9hp3m9k85rhhd59mzta53xvm +50_000,gm,penumbra1vw6j4uankvltjwz08khc7tx0u7fsgjc4x58skfvxfda079rqys73y88kwank6mncr5xewz4ahy7863s0flnshvqyaapxtcjv0wrmtpgdue9f8v9hp3m9k85rhhd59mzta53xvm +50_000,gn,penumbra1vw6j4uankvltjwz08khc7tx0u7fsgjc4x58skfvxfda079rqys73y88kwank6mncr5xewz4ahy7863s0flnshvqyaapxtcjv0wrmtpgdue9f8v9hp3m9k85rhhd59mzta53xvm +25_000,pizza,penumbra1vw6j4uankvltjwz08khc7tx0u7fsgjc4x58skfvxfda079rqys73y88kwank6mncr5xewz4ahy7863s0flnshvqyaapxtcjv0wrmtpgdue9f8v9hp3m9k85rhhd59mzta53xvm +250,cube,penumbra1vw6j4uankvltjwz08khc7tx0u7fsgjc4x58skfvxfda079rqys73y88kwank6mncr5xewz4ahy7863s0flnshvqyaapxtcjv0wrmtpgdue9f8v9hp3m9k85rhhd59mzta53xvm +1_000_000,test_usd,penumbra1vw6j4uankvltjwz08khc7tx0u7fsgjc4x58skfvxfda079rqys73y88kwank6mncr5xewz4ahy7863s0flnshvqyaapxtcjv0wrmtpgdue9f8v9hp3m9k85rhhd59mzta53xvm +10_000,nala,penumbra1vw6j4uankvltjwz08khc7tx0u7fsgjc4x58skfvxfda079rqys73y88kwank6mncr5xewz4ahy7863s0flnshvqyaapxtcjv0wrmtpgdue9f8v9hp3m9k85rhhd59mzta53xvm +5_000_000__000_000,upenumbra,penumbra1e2tn2yzapw8mxncn3uwjqe2k8fsdvdxu3pcswlwl80a8gf87glrq3mqstc7rlcxa7snk6vke8rrwzxytpf4w3xzhy2edhhuxde4u2z48w7a2f97xea3z5da9pk3h2j0axn294m +50_000,gm,penumbra1e2tn2yzapw8mxncn3uwjqe2k8fsdvdxu3pcswlwl80a8gf87glrq3mqstc7rlcxa7snk6vke8rrwzxytpf4w3xzhy2edhhuxde4u2z48w7a2f97xea3z5da9pk3h2j0axn294m +50_000,gn,penumbra1e2tn2yzapw8mxncn3uwjqe2k8fsdvdxu3pcswlwl80a8gf87glrq3mqstc7rlcxa7snk6vke8rrwzxytpf4w3xzhy2edhhuxde4u2z48w7a2f97xea3z5da9pk3h2j0axn294m +25_000,pizza,penumbra1e2tn2yzapw8mxncn3uwjqe2k8fsdvdxu3pcswlwl80a8gf87glrq3mqstc7rlcxa7snk6vke8rrwzxytpf4w3xzhy2edhhuxde4u2z48w7a2f97xea3z5da9pk3h2j0axn294m +250,cube,penumbra1e2tn2yzapw8mxncn3uwjqe2k8fsdvdxu3pcswlwl80a8gf87glrq3mqstc7rlcxa7snk6vke8rrwzxytpf4w3xzhy2edhhuxde4u2z48w7a2f97xea3z5da9pk3h2j0axn294m +1_000_000,test_usd,penumbra1e2tn2yzapw8mxncn3uwjqe2k8fsdvdxu3pcswlwl80a8gf87glrq3mqstc7rlcxa7snk6vke8rrwzxytpf4w3xzhy2edhhuxde4u2z48w7a2f97xea3z5da9pk3h2j0axn294m +10_000,nala,penumbra1e2tn2yzapw8mxncn3uwjqe2k8fsdvdxu3pcswlwl80a8gf87glrq3mqstc7rlcxa7snk6vke8rrwzxytpf4w3xzhy2edhhuxde4u2z48w7a2f97xea3z5da9pk3h2j0axn294m +5_000_000__000_000,upenumbra,penumbra1u3x4wdz7j8qfx7q5hte40u9gq788qz5zn4aqlr5kfzw3dxzxyktxjq2y0ma3tnd6ufwzszm25spg45my74fqn3qmeq9laj3cz3pr7eqym6f4gldsngjrwm22gf4damepgkqegk +50_000,gm,penumbra1u3x4wdz7j8qfx7q5hte40u9gq788qz5zn4aqlr5kfzw3dxzxyktxjq2y0ma3tnd6ufwzszm25spg45my74fqn3qmeq9laj3cz3pr7eqym6f4gldsngjrwm22gf4damepgkqegk +50_000,gn,penumbra1u3x4wdz7j8qfx7q5hte40u9gq788qz5zn4aqlr5kfzw3dxzxyktxjq2y0ma3tnd6ufwzszm25spg45my74fqn3qmeq9laj3cz3pr7eqym6f4gldsngjrwm22gf4damepgkqegk +25_000,pizza,penumbra1u3x4wdz7j8qfx7q5hte40u9gq788qz5zn4aqlr5kfzw3dxzxyktxjq2y0ma3tnd6ufwzszm25spg45my74fqn3qmeq9laj3cz3pr7eqym6f4gldsngjrwm22gf4damepgkqegk +250,cube,penumbra1u3x4wdz7j8qfx7q5hte40u9gq788qz5zn4aqlr5kfzw3dxzxyktxjq2y0ma3tnd6ufwzszm25spg45my74fqn3qmeq9laj3cz3pr7eqym6f4gldsngjrwm22gf4damepgkqegk +1_000_000,test_usd,penumbra1u3x4wdz7j8qfx7q5hte40u9gq788qz5zn4aqlr5kfzw3dxzxyktxjq2y0ma3tnd6ufwzszm25spg45my74fqn3qmeq9laj3cz3pr7eqym6f4gldsngjrwm22gf4damepgkqegk +10_000,nala,penumbra1u3x4wdz7j8qfx7q5hte40u9gq788qz5zn4aqlr5kfzw3dxzxyktxjq2y0ma3tnd6ufwzszm25spg45my74fqn3qmeq9laj3cz3pr7eqym6f4gldsngjrwm22gf4damepgkqegk +5_000_000__000_000,upenumbra,penumbra1l6kpj6e5d7ph0nnggxwgwhk5wumm7k2vjulq0jysdvh7y2p0r0qe5dr7jntl8wz6xjt08dqvu8dnn49fnxhta5mfcdl0kjpruhj76umulw2dssslansy7qv6k3ajeh72qz2all +50_000,gm,penumbra1l6kpj6e5d7ph0nnggxwgwhk5wumm7k2vjulq0jysdvh7y2p0r0qe5dr7jntl8wz6xjt08dqvu8dnn49fnxhta5mfcdl0kjpruhj76umulw2dssslansy7qv6k3ajeh72qz2all +50_000,gn,penumbra1l6kpj6e5d7ph0nnggxwgwhk5wumm7k2vjulq0jysdvh7y2p0r0qe5dr7jntl8wz6xjt08dqvu8dnn49fnxhta5mfcdl0kjpruhj76umulw2dssslansy7qv6k3ajeh72qz2all +25_000,pizza,penumbra1l6kpj6e5d7ph0nnggxwgwhk5wumm7k2vjulq0jysdvh7y2p0r0qe5dr7jntl8wz6xjt08dqvu8dnn49fnxhta5mfcdl0kjpruhj76umulw2dssslansy7qv6k3ajeh72qz2all +250,cube,penumbra1l6kpj6e5d7ph0nnggxwgwhk5wumm7k2vjulq0jysdvh7y2p0r0qe5dr7jntl8wz6xjt08dqvu8dnn49fnxhta5mfcdl0kjpruhj76umulw2dssslansy7qv6k3ajeh72qz2all +1_000_000,test_usd,penumbra1l6kpj6e5d7ph0nnggxwgwhk5wumm7k2vjulq0jysdvh7y2p0r0qe5dr7jntl8wz6xjt08dqvu8dnn49fnxhta5mfcdl0kjpruhj76umulw2dssslansy7qv6k3ajeh72qz2all +10_000,nala,penumbra1l6kpj6e5d7ph0nnggxwgwhk5wumm7k2vjulq0jysdvh7y2p0r0qe5dr7jntl8wz6xjt08dqvu8dnn49fnxhta5mfcdl0kjpruhj76umulw2dssslansy7qv6k3ajeh72qz2all +5_000_000__000_000,upenumbra,penumbra12hcj6z3xxk4an08msjf2fu0qk90nv6582lqzlmjwnnd75xz6u928wrcw8vpqxd489d6nqyvf7raq7jv75hg3qsa0ut9j2sxhfqfwrh56v064xh7hemdp898mls38v8ca76et3x +50_000,gm,penumbra12hcj6z3xxk4an08msjf2fu0qk90nv6582lqzlmjwnnd75xz6u928wrcw8vpqxd489d6nqyvf7raq7jv75hg3qsa0ut9j2sxhfqfwrh56v064xh7hemdp898mls38v8ca76et3x +50_000,gn,penumbra12hcj6z3xxk4an08msjf2fu0qk90nv6582lqzlmjwnnd75xz6u928wrcw8vpqxd489d6nqyvf7raq7jv75hg3qsa0ut9j2sxhfqfwrh56v064xh7hemdp898mls38v8ca76et3x +25_000,pizza,penumbra12hcj6z3xxk4an08msjf2fu0qk90nv6582lqzlmjwnnd75xz6u928wrcw8vpqxd489d6nqyvf7raq7jv75hg3qsa0ut9j2sxhfqfwrh56v064xh7hemdp898mls38v8ca76et3x +250,cube,penumbra12hcj6z3xxk4an08msjf2fu0qk90nv6582lqzlmjwnnd75xz6u928wrcw8vpqxd489d6nqyvf7raq7jv75hg3qsa0ut9j2sxhfqfwrh56v064xh7hemdp898mls38v8ca76et3x +1_000_000,test_usd,penumbra12hcj6z3xxk4an08msjf2fu0qk90nv6582lqzlmjwnnd75xz6u928wrcw8vpqxd489d6nqyvf7raq7jv75hg3qsa0ut9j2sxhfqfwrh56v064xh7hemdp898mls38v8ca76et3x +10_000,nala,penumbra12hcj6z3xxk4an08msjf2fu0qk90nv6582lqzlmjwnnd75xz6u928wrcw8vpqxd489d6nqyvf7raq7jv75hg3qsa0ut9j2sxhfqfwrh56v064xh7hemdp898mls38v8ca76et3x +5_000_000__000_000,upenumbra,penumbra1nzyf0qh28727tv56wcyj4v5c8ur9vfel52uxmpmypzs7dyn5986ng3j9l3lnuyj73cls5dgrhn7exe7j9cp2kx6rnf6ggus6s3w2kkd8kf6ydr5wfplw34vzxrk9e8n20ckm70 +50_000,gm,penumbra1nzyf0qh28727tv56wcyj4v5c8ur9vfel52uxmpmypzs7dyn5986ng3j9l3lnuyj73cls5dgrhn7exe7j9cp2kx6rnf6ggus6s3w2kkd8kf6ydr5wfplw34vzxrk9e8n20ckm70 +50_000,gn,penumbra1nzyf0qh28727tv56wcyj4v5c8ur9vfel52uxmpmypzs7dyn5986ng3j9l3lnuyj73cls5dgrhn7exe7j9cp2kx6rnf6ggus6s3w2kkd8kf6ydr5wfplw34vzxrk9e8n20ckm70 +25_000,pizza,penumbra1nzyf0qh28727tv56wcyj4v5c8ur9vfel52uxmpmypzs7dyn5986ng3j9l3lnuyj73cls5dgrhn7exe7j9cp2kx6rnf6ggus6s3w2kkd8kf6ydr5wfplw34vzxrk9e8n20ckm70 +250,cube,penumbra1nzyf0qh28727tv56wcyj4v5c8ur9vfel52uxmpmypzs7dyn5986ng3j9l3lnuyj73cls5dgrhn7exe7j9cp2kx6rnf6ggus6s3w2kkd8kf6ydr5wfplw34vzxrk9e8n20ckm70 +1_000_000,test_usd,penumbra1nzyf0qh28727tv56wcyj4v5c8ur9vfel52uxmpmypzs7dyn5986ng3j9l3lnuyj73cls5dgrhn7exe7j9cp2kx6rnf6ggus6s3w2kkd8kf6ydr5wfplw34vzxrk9e8n20ckm70 +10_000,nala,penumbra1nzyf0qh28727tv56wcyj4v5c8ur9vfel52uxmpmypzs7dyn5986ng3j9l3lnuyj73cls5dgrhn7exe7j9cp2kx6rnf6ggus6s3w2kkd8kf6ydr5wfplw34vzxrk9e8n20ckm70 +5_000_000__000_000,upenumbra,penumbra1kllhj0jprucna84r8pnwsngccr0dl2erdra3sa75mhvprr688e98j2cl9j7ls9njvufcwhg03jzm8zea3rmgaevk2y42yy2gzxq4qgpm4cnx5xe4wz2qj4f3ramhxrpqspju04 +50_000,gm,penumbra1kllhj0jprucna84r8pnwsngccr0dl2erdra3sa75mhvprr688e98j2cl9j7ls9njvufcwhg03jzm8zea3rmgaevk2y42yy2gzxq4qgpm4cnx5xe4wz2qj4f3ramhxrpqspju04 +50_000,gn,penumbra1kllhj0jprucna84r8pnwsngccr0dl2erdra3sa75mhvprr688e98j2cl9j7ls9njvufcwhg03jzm8zea3rmgaevk2y42yy2gzxq4qgpm4cnx5xe4wz2qj4f3ramhxrpqspju04 +25_000,pizza,penumbra1kllhj0jprucna84r8pnwsngccr0dl2erdra3sa75mhvprr688e98j2cl9j7ls9njvufcwhg03jzm8zea3rmgaevk2y42yy2gzxq4qgpm4cnx5xe4wz2qj4f3ramhxrpqspju04 +250,cube,penumbra1kllhj0jprucna84r8pnwsngccr0dl2erdra3sa75mhvprr688e98j2cl9j7ls9njvufcwhg03jzm8zea3rmgaevk2y42yy2gzxq4qgpm4cnx5xe4wz2qj4f3ramhxrpqspju04 +1_000_000,test_usd,penumbra1kllhj0jprucna84r8pnwsngccr0dl2erdra3sa75mhvprr688e98j2cl9j7ls9njvufcwhg03jzm8zea3rmgaevk2y42yy2gzxq4qgpm4cnx5xe4wz2qj4f3ramhxrpqspju04 +10_000,nala,penumbra1kllhj0jprucna84r8pnwsngccr0dl2erdra3sa75mhvprr688e98j2cl9j7ls9njvufcwhg03jzm8zea3rmgaevk2y42yy2gzxq4qgpm4cnx5xe4wz2qj4f3ramhxrpqspju04 +10_000_000__000_000,upenumbra,penumbra1rqcd3hfvkvc04c4c9vc0ac87lh4y0z8l28k4xp6d0cnd5jc6f6k0neuzp6zdwtpwyfpswtdzv9jzqtpjn5t6wh96pfx3flq2dhqgc42u7c06kj57dl39w2xm6tg0wh4zc8kjjk +10_000_000,gm,penumbra1rqcd3hfvkvc04c4c9vc0ac87lh4y0z8l28k4xp6d0cnd5jc6f6k0neuzp6zdwtpwyfpswtdzv9jzqtpjn5t6wh96pfx3flq2dhqgc42u7c06kj57dl39w2xm6tg0wh4zc8kjjk +10_000_000,gn,penumbra1rqcd3hfvkvc04c4c9vc0ac87lh4y0z8l28k4xp6d0cnd5jc6f6k0neuzp6zdwtpwyfpswtdzv9jzqtpjn5t6wh96pfx3flq2dhqgc42u7c06kj57dl39w2xm6tg0wh4zc8kjjk +10_000_000,pizza,penumbra1rqcd3hfvkvc04c4c9vc0ac87lh4y0z8l28k4xp6d0cnd5jc6f6k0neuzp6zdwtpwyfpswtdzv9jzqtpjn5t6wh96pfx3flq2dhqgc42u7c06kj57dl39w2xm6tg0wh4zc8kjjk +10_000_000,cube,penumbra1rqcd3hfvkvc04c4c9vc0ac87lh4y0z8l28k4xp6d0cnd5jc6f6k0neuzp6zdwtpwyfpswtdzv9jzqtpjn5t6wh96pfx3flq2dhqgc42u7c06kj57dl39w2xm6tg0wh4zc8kjjk +10_000_000,test_usd,penumbra1rqcd3hfvkvc04c4c9vc0ac87lh4y0z8l28k4xp6d0cnd5jc6f6k0neuzp6zdwtpwyfpswtdzv9jzqtpjn5t6wh96pfx3flq2dhqgc42u7c06kj57dl39w2xm6tg0wh4zc8kjjk +10_000_000,test_btc,penumbra1rqcd3hfvkvc04c4c9vc0ac87lh4y0z8l28k4xp6d0cnd5jc6f6k0neuzp6zdwtpwyfpswtdzv9jzqtpjn5t6wh96pfx3flq2dhqgc42u7c06kj57dl39w2xm6tg0wh4zc8kjjk +10_000_000,test_eth,penumbra1rqcd3hfvkvc04c4c9vc0ac87lh4y0z8l28k4xp6d0cnd5jc6f6k0neuzp6zdwtpwyfpswtdzv9jzqtpjn5t6wh96pfx3flq2dhqgc42u7c06kj57dl39w2xm6tg0wh4zc8kjjk +10_000_000,test_atom,penumbra1rqcd3hfvkvc04c4c9vc0ac87lh4y0z8l28k4xp6d0cnd5jc6f6k0neuzp6zdwtpwyfpswtdzv9jzqtpjn5t6wh96pfx3flq2dhqgc42u7c06kj57dl39w2xm6tg0wh4zc8kjjk +10_000_000,test_osmo,penumbra1rqcd3hfvkvc04c4c9vc0ac87lh4y0z8l28k4xp6d0cnd5jc6f6k0neuzp6zdwtpwyfpswtdzv9jzqtpjn5t6wh96pfx3flq2dhqgc42u7c06kj57dl39w2xm6tg0wh4zc8kjjk +10_000_000__000_000,upenumbra,penumbra105shuajmml02qgfwmxz7rqqcjwu8tzjdyd3u75za35cj0se3pv5ssmxlv9qc86hawtae35whfst78g30z9xhyxvh8vrz433rlvslzg5675rwr8rcz7g6pszmc5tkm63u6kqa3f +10_000_000,gm,penumbra105shuajmml02qgfwmxz7rqqcjwu8tzjdyd3u75za35cj0se3pv5ssmxlv9qc86hawtae35whfst78g30z9xhyxvh8vrz433rlvslzg5675rwr8rcz7g6pszmc5tkm63u6kqa3f +10_000_000,gn,penumbra105shuajmml02qgfwmxz7rqqcjwu8tzjdyd3u75za35cj0se3pv5ssmxlv9qc86hawtae35whfst78g30z9xhyxvh8vrz433rlvslzg5675rwr8rcz7g6pszmc5tkm63u6kqa3f +10_000_000,pizza,penumbra105shuajmml02qgfwmxz7rqqcjwu8tzjdyd3u75za35cj0se3pv5ssmxlv9qc86hawtae35whfst78g30z9xhyxvh8vrz433rlvslzg5675rwr8rcz7g6pszmc5tkm63u6kqa3f +10_000_000,cube,penumbra105shuajmml02qgfwmxz7rqqcjwu8tzjdyd3u75za35cj0se3pv5ssmxlv9qc86hawtae35whfst78g30z9xhyxvh8vrz433rlvslzg5675rwr8rcz7g6pszmc5tkm63u6kqa3f +10_000_000,test_usd,penumbra105shuajmml02qgfwmxz7rqqcjwu8tzjdyd3u75za35cj0se3pv5ssmxlv9qc86hawtae35whfst78g30z9xhyxvh8vrz433rlvslzg5675rwr8rcz7g6pszmc5tkm63u6kqa3f +10_000_000,test_btc,penumbra105shuajmml02qgfwmxz7rqqcjwu8tzjdyd3u75za35cj0se3pv5ssmxlv9qc86hawtae35whfst78g30z9xhyxvh8vrz433rlvslzg5675rwr8rcz7g6pszmc5tkm63u6kqa3f +10_000_000,test_eth,penumbra105shuajmml02qgfwmxz7rqqcjwu8tzjdyd3u75za35cj0se3pv5ssmxlv9qc86hawtae35whfst78g30z9xhyxvh8vrz433rlvslzg5675rwr8rcz7g6pszmc5tkm63u6kqa3f +10_000_000,test_atom,penumbra105shuajmml02qgfwmxz7rqqcjwu8tzjdyd3u75za35cj0se3pv5ssmxlv9qc86hawtae35whfst78g30z9xhyxvh8vrz433rlvslzg5675rwr8rcz7g6pszmc5tkm63u6kqa3f +10_000_000,test_osmo,penumbra105shuajmml02qgfwmxz7rqqcjwu8tzjdyd3u75za35cj0se3pv5ssmxlv9qc86hawtae35whfst78g30z9xhyxvh8vrz433rlvslzg5675rwr8rcz7g6pszmc5tkm63u6kqa3f +100,gm,penumbra147mfall0zr6am5r45qkwht7xqqrdsp50czde7empv7yq2nk3z8yyfh9k9520ddgswkmzar22vhz9dwtuem7uxw0qytfpv7lk3q9dp8ccaw2fn5c838rfackazmgf3ahh09cxmz +5001,test_usd,penumbra147mfall0zr6am5r45qkwht7xqqrdsp50czde7empv7yq2nk3z8yyfh9k9520ddgswkmzar22vhz9dwtuem7uxw0qytfpv7lk3q9dp8ccaw2fn5c838rfackazmgf3ahh09cxmz +1,cube,penumbra147mfall0zr6am5r45qkwht7xqqrdsp50czde7empv7yq2nk3z8yyfh9k9520ddgswkmzar22vhz9dwtuem7uxw0qytfpv7lk3q9dp8ccaw2fn5c838rfackazmgf3ahh09cxmz +2_000__000_000,upenumbra,penumbra147mfall0zr6am5r45qkwht7xqqrdsp50czde7empv7yq2nk3z8yyfh9k9520ddgswkmzar22vhz9dwtuem7uxw0qytfpv7lk3q9dp8ccaw2fn5c838rfackazmgf3ahh09cxmz +1_000,test_usd,penumbra1vmmz304hjlkjq6xv4al5dqumvgk3ek82rneagj07vdqkudjvl6y7zxzr5k6qq24yc7yyyekpu9qm7ef3acg2u8p950hs6hu3e73guq5pfmmvm63qudfx4qmg8h7fdweyw3ektn +1_000__000_000,upenumbra,penumbra1vmmz304hjlkjq6xv4al5dqumvgk3ek82rneagj07vdqkudjvl6y7zxzr5k6qq24yc7yyyekpu9qm7ef3acg2u8p950hs6hu3e73guq5pfmmvm63qudfx4qmg8h7fdweyw3ektn +200__000_000,upenumbra,penumbra1k8ek3e8f953u080n06rph4va80zlnmy30gg39dpx4gf4hcf2z7fujq6gm2cjwgruzksh2zv0zzuvwa8fgy5a2szdrgsuvq3tpgcs4yt2s9se6p966dgdf2khepnsqejg3fw3gm +200__000_000,upenumbra,penumbra1pvm0wztw203f6363va6zfp6se3pcl7mty44en34mjc60hwtsrxuz30mfnyasjdvg3725vyy6gwr2cg30kua9thssu3ew56l4wf46pld3qmeatjclsmcmqmy72kjh5cu2lf2jy0 +200__000_000,upenumbra,penumbra1gzngu24e2crknq8mea437el5cx5scr8w34eucl5jp70jguxt660ta2vwzu4dkc2wlmhdxncdd4r7v4fcyq7r7px57kwy5w0zx8euzj3ck2l9nqmlng3flvzsrztz7sxdg0e27d +200__000_000,upenumbra,penumbra1tz0yc6eqr0dkhywvz5ufwghhrycp2ze9lez0xj6r7zfv7s7yyu2gen9uspy7ydp7fglpgnuu967flp49zvkzmht9my653wqhykgpnhww258eh6f6fluzq0vqsl2hlan5lqqrxq +200__000_000,upenumbra,penumbra1z4mqp3dtnkum03rwahq7xan37467pkgclen5p7wlfpexqjhpwmuwc8gzgxyqrqqw2zg8rsa8t3hvdaw7ucfwzez9whwquzglrajlygrh62jj9lfnp9gmpr4jwgzx4503c8mh9z diff --git a/testnets/080-phobos/validators.json b/testnets/080-phobos/validators.json new file mode 100644 index 0000000000..1dbe6ac9f6 --- /dev/null +++ b/testnets/080-phobos/validators.json @@ -0,0 +1,9 @@ +[ + { + "name": "Penumbra Labs CI 1", + "website": "https://penumbra.zone", + "description": "This is a validator run by Penumbra Labs, using testnets as a public CI", + "funding_streams": [], + "sequence_number": 0 + } +] diff --git a/testnets/base_addresses.txt b/testnets/base_addresses.txt index 94f0b24164..58b12d3015 100644 --- a/testnets/base_addresses.txt +++ b/testnets/base_addresses.txt @@ -2,16 +2,16 @@ penumbra1tj9s0dh8ymphnw2qgrva57wpcmpr38e9a8w05t5c8dyynr7dt58hgnare7tunqyuryc7yag penumbra12ts5g469dpjdcn2wmdkz25nng6snxkuxumfnmygtrp2fm6776zjw7r35y2j7s07h8368a4kr3c904w8cjgpm6v3ysaj6gh930z7fwlqq9kvjewsy9fgamzxyj874kgu5uvcshg penumbra19zz058ttl8vhsypztc0gyl9yfs7jcn3906kgd3pzeh944klh8vf2ttx7qvscxwtuecw92cy6n55ttjn482q7ufpzwj5yem9xcvecrd2zc6vgctxzc3k7mnpg0lk8vved00e3g0 penumbra1hckq024z3fd6wl29kk4rwfnykgyxyawpuw8zc04npc7s40hey0xghltdcsk7q5k0cq77qjtg8gt0cnvqff94s8j5tvx94ssyjyr35c9rqx08lkwxfqgr6dt3vu99wg8dg0c7jz -penumbra1djxk3jzq5d3ga63vut39gdr9lh57yp34pu79jnjmjna3mmxuqaf476e7ar9je0hfn35cpuu4s7l9vae4gfd4773zepygwed5pfgdad26pvzd6qqaur4czes0cl5ku7jjvhx0e0 penumbra1f5qra6dk8pmp0m2s42sq4qsxthjp6mmcpj3rjwrn74getpmwhrgzhe545fhz6eew2gdcpn2ee0j7fdm4whxl8ux6p5jz6g5fz72u4pzd4z26jnxeasvsvwapx2lvhxw8uy7dvw penumbra18dms48wvudc7qljl6zjq48hyuvxxvhm2se2zltpjndpnn5c092mwych7uvs0xztpc7q0e2j7s89nyfxyj2pmu50zafvhwq5x4prm49ax6c7txmvjjdkgu9c8u0nhyqgrsmfn2c penumbra169w7gexlfa9m7gsa3vmwyfpu92j0yxawmlkt06g3qg5ta29czwzepzqp0d0nl324jgcjrc6fy0kmqhjlureeyjulejug8e2h6zj0kqf0vapwszahqm8swg634extm3837kl59v -penumbra10as9rd2ny8w6v70sx4cetrph9twy5vu4ystjz6ae0uuutjamfztl2649vdufmya02a6r0907ywuxkhz3uve6thaf5et2jhnx97k0h0t07ad9pdhawls7wzklrktefzwwvrxtsn +penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd penumbra1ks9t4vrp9alvk9yfapyu75hjf7p3wva76t0dz9a902zlp8vuyn0q9j2a72pu9j0uxmg356xstqpmafkj28ktx37l4lzq25nmgdeay5ls00yky2pgladnr5z7u3ftav7lw9vnre penumbra183uj3sh6d22j6mguu3vlpfcp87lh93jfzn3ucehfk2j4ek07jnwelznq2k8jk200gwrnyyvttexay2u638l7s6dhnh7vec3wscyefslswnhwd44ywqfxzxd4wdt8eqxy8st7k0 -penumbra1u8eg9v2dlge3yd7gjkdmnzj3quvgwy204ueryl0uzxpjjpan4fwkjwd8mlrkpfffe6vuweeq0x7wn3rhljj7qaedye4kuhpj9fmcu4zl0cn6up0syg60hrc70x9edf2gy0shft penumbra1r70z64vqnmv28rttpv60hppd7gca9w5a3zwctx4c6yrtfd4mlvjrevkx3l2swdnw8vq7amylgfwaq2x97dlxmmh0flpxetmd8gjf0rpmg083ms94psmvpgy7hgdhwwn6nr92sc penumbra18z69jxec5008krvlhw84459ecermc2x5r06flunlz6m45ftmphp2djp9cpxy4n4m2tykz00jjd3pgm5n3etpaq43ypt4xffcz8ag0ay3hutt7hdfevmjfft237j2x2vae99v3f penumbra1q87euedu4yw3f8c025zulgu6tgc8frmhak7ge6xwyauuptlp4z25mn92z6qpa9nhdp08rfs3lakxqpgpd58m8u02cjcrpec0xdt6t9zlgq7fkxmklvtpktykwn0rdlxeqyny2n -penumbra1zn54t9n55da28vkytffkkrlfxcwutn4nlj05xwfw4jmgzr60jfqazvahzwz2ts9j0juru8r8ghwr47klmcvuhny6jfcunahs84z7fmgkk54xvfmvveqh4yjc69sg0jrf92r6kn penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg +penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m diff --git a/testnets/base_allocations.csv b/testnets/base_allocations.csv index ca26d74b6c..f4d069a50f 100644 --- a/testnets/base_allocations.csv +++ b/testnets/base_allocations.csv @@ -23,12 +23,6 @@ amount,denom,address 10_000,pizza,penumbra1hckq024z3fd6wl29kk4rwfnykgyxyawpuw8zc04npc7s40hey0xghltdcsk7q5k0cq77qjtg8gt0cnvqff94s8j5tvx94ssyjyr35c9rqx08lkwxfqgr6dt3vu99wg8dg0c7jz 100,cube,penumbra1hckq024z3fd6wl29kk4rwfnykgyxyawpuw8zc04npc7s40hey0xghltdcsk7q5k0cq77qjtg8gt0cnvqff94s8j5tvx94ssyjyr35c9rqx08lkwxfqgr6dt3vu99wg8dg0c7jz 500_000,test_usd,penumbra1hckq024z3fd6wl29kk4rwfnykgyxyawpuw8zc04npc7s40hey0xghltdcsk7q5k0cq77qjtg8gt0cnvqff94s8j5tvx94ssyjyr35c9rqx08lkwxfqgr6dt3vu99wg8dg0c7jz -1_000_000__000_000,upenumbra,penumbra1djxk3jzq5d3ga63vut39gdr9lh57yp34pu79jnjmjna3mmxuqaf476e7ar9je0hfn35cpuu4s7l9vae4gfd4773zepygwed5pfgdad26pvzd6qqaur4czes0cl5ku7jjvhx0e0 -20_000,gm,penumbra1djxk3jzq5d3ga63vut39gdr9lh57yp34pu79jnjmjna3mmxuqaf476e7ar9je0hfn35cpuu4s7l9vae4gfd4773zepygwed5pfgdad26pvzd6qqaur4czes0cl5ku7jjvhx0e0 -20_000,gn,penumbra1djxk3jzq5d3ga63vut39gdr9lh57yp34pu79jnjmjna3mmxuqaf476e7ar9je0hfn35cpuu4s7l9vae4gfd4773zepygwed5pfgdad26pvzd6qqaur4czes0cl5ku7jjvhx0e0 -10_000,pizza,penumbra1djxk3jzq5d3ga63vut39gdr9lh57yp34pu79jnjmjna3mmxuqaf476e7ar9je0hfn35cpuu4s7l9vae4gfd4773zepygwed5pfgdad26pvzd6qqaur4czes0cl5ku7jjvhx0e0 -100,cube,penumbra1djxk3jzq5d3ga63vut39gdr9lh57yp34pu79jnjmjna3mmxuqaf476e7ar9je0hfn35cpuu4s7l9vae4gfd4773zepygwed5pfgdad26pvzd6qqaur4czes0cl5ku7jjvhx0e0 -500_000,test_usd,penumbra1djxk3jzq5d3ga63vut39gdr9lh57yp34pu79jnjmjna3mmxuqaf476e7ar9je0hfn35cpuu4s7l9vae4gfd4773zepygwed5pfgdad26pvzd6qqaur4czes0cl5ku7jjvhx0e0 1_000_000__000_000,upenumbra,penumbra1f5qra6dk8pmp0m2s42sq4qsxthjp6mmcpj3rjwrn74getpmwhrgzhe545fhz6eew2gdcpn2ee0j7fdm4whxl8ux6p5jz6g5fz72u4pzd4z26jnxeasvsvwapx2lvhxw8uy7dvw 20_000,gm,penumbra1f5qra6dk8pmp0m2s42sq4qsxthjp6mmcpj3rjwrn74getpmwhrgzhe545fhz6eew2gdcpn2ee0j7fdm4whxl8ux6p5jz6g5fz72u4pzd4z26jnxeasvsvwapx2lvhxw8uy7dvw 20_000,gn,penumbra1f5qra6dk8pmp0m2s42sq4qsxthjp6mmcpj3rjwrn74getpmwhrgzhe545fhz6eew2gdcpn2ee0j7fdm4whxl8ux6p5jz6g5fz72u4pzd4z26jnxeasvsvwapx2lvhxw8uy7dvw @@ -47,12 +41,12 @@ amount,denom,address 10_000,pizza,penumbra169w7gexlfa9m7gsa3vmwyfpu92j0yxawmlkt06g3qg5ta29czwzepzqp0d0nl324jgcjrc6fy0kmqhjlureeyjulejug8e2h6zj0kqf0vapwszahqm8swg634extm3837kl59v 100,cube,penumbra169w7gexlfa9m7gsa3vmwyfpu92j0yxawmlkt06g3qg5ta29czwzepzqp0d0nl324jgcjrc6fy0kmqhjlureeyjulejug8e2h6zj0kqf0vapwszahqm8swg634extm3837kl59v 500_000,test_usd,penumbra169w7gexlfa9m7gsa3vmwyfpu92j0yxawmlkt06g3qg5ta29czwzepzqp0d0nl324jgcjrc6fy0kmqhjlureeyjulejug8e2h6zj0kqf0vapwszahqm8swg634extm3837kl59v -1_000_000__000_000,upenumbra,penumbra10as9rd2ny8w6v70sx4cetrph9twy5vu4ystjz6ae0uuutjamfztl2649vdufmya02a6r0907ywuxkhz3uve6thaf5et2jhnx97k0h0t07ad9pdhawls7wzklrktefzwwvrxtsn -20_000,gm,penumbra10as9rd2ny8w6v70sx4cetrph9twy5vu4ystjz6ae0uuutjamfztl2649vdufmya02a6r0907ywuxkhz3uve6thaf5et2jhnx97k0h0t07ad9pdhawls7wzklrktefzwwvrxtsn -20_000,gn,penumbra10as9rd2ny8w6v70sx4cetrph9twy5vu4ystjz6ae0uuutjamfztl2649vdufmya02a6r0907ywuxkhz3uve6thaf5et2jhnx97k0h0t07ad9pdhawls7wzklrktefzwwvrxtsn -10_000,pizza,penumbra10as9rd2ny8w6v70sx4cetrph9twy5vu4ystjz6ae0uuutjamfztl2649vdufmya02a6r0907ywuxkhz3uve6thaf5et2jhnx97k0h0t07ad9pdhawls7wzklrktefzwwvrxtsn -100,cube,penumbra10as9rd2ny8w6v70sx4cetrph9twy5vu4ystjz6ae0uuutjamfztl2649vdufmya02a6r0907ywuxkhz3uve6thaf5et2jhnx97k0h0t07ad9pdhawls7wzklrktefzwwvrxtsn -500_000,test_usd,penumbra10as9rd2ny8w6v70sx4cetrph9twy5vu4ystjz6ae0uuutjamfztl2649vdufmya02a6r0907ywuxkhz3uve6thaf5et2jhnx97k0h0t07ad9pdhawls7wzklrktefzwwvrxtsn +1_000_000__000_000,upenumbra,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +20_000,gm,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +20_000,gn,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +10_000,pizza,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +100,cube,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd +500_000,test_usd,penumbra1e7erpuxqa7u0zcvdnqm0e5w6dvj2arksj0q99gmtlzj4mk5xnddkrnsjl4e28ughgjxm3jaendsrmc3shn7ewwxav6lvewqxqz8gs0zypnnzys27jgnvmcfxpgd4y5sg2gdeyd 1_000_000__000_000,upenumbra,penumbra1ks9t4vrp9alvk9yfapyu75hjf7p3wva76t0dz9a902zlp8vuyn0q9j2a72pu9j0uxmg356xstqpmafkj28ktx37l4lzq25nmgdeay5ls00yky2pgladnr5z7u3ftav7lw9vnre 20_000,gm,penumbra1ks9t4vrp9alvk9yfapyu75hjf7p3wva76t0dz9a902zlp8vuyn0q9j2a72pu9j0uxmg356xstqpmafkj28ktx37l4lzq25nmgdeay5ls00yky2pgladnr5z7u3ftav7lw9vnre 20_000,gn,penumbra1ks9t4vrp9alvk9yfapyu75hjf7p3wva76t0dz9a902zlp8vuyn0q9j2a72pu9j0uxmg356xstqpmafkj28ktx37l4lzq25nmgdeay5ls00yky2pgladnr5z7u3ftav7lw9vnre @@ -65,12 +59,6 @@ amount,denom,address 10_000,pizza,penumbra183uj3sh6d22j6mguu3vlpfcp87lh93jfzn3ucehfk2j4ek07jnwelznq2k8jk200gwrnyyvttexay2u638l7s6dhnh7vec3wscyefslswnhwd44ywqfxzxd4wdt8eqxy8st7k0 100,cube,penumbra183uj3sh6d22j6mguu3vlpfcp87lh93jfzn3ucehfk2j4ek07jnwelznq2k8jk200gwrnyyvttexay2u638l7s6dhnh7vec3wscyefslswnhwd44ywqfxzxd4wdt8eqxy8st7k0 500_000,test_usd,penumbra183uj3sh6d22j6mguu3vlpfcp87lh93jfzn3ucehfk2j4ek07jnwelznq2k8jk200gwrnyyvttexay2u638l7s6dhnh7vec3wscyefslswnhwd44ywqfxzxd4wdt8eqxy8st7k0 -1_000_000__000_000,upenumbra,penumbra1u8eg9v2dlge3yd7gjkdmnzj3quvgwy204ueryl0uzxpjjpan4fwkjwd8mlrkpfffe6vuweeq0x7wn3rhljj7qaedye4kuhpj9fmcu4zl0cn6up0syg60hrc70x9edf2gy0shft -20_000,gm,penumbra1u8eg9v2dlge3yd7gjkdmnzj3quvgwy204ueryl0uzxpjjpan4fwkjwd8mlrkpfffe6vuweeq0x7wn3rhljj7qaedye4kuhpj9fmcu4zl0cn6up0syg60hrc70x9edf2gy0shft -20_000,gn,penumbra1u8eg9v2dlge3yd7gjkdmnzj3quvgwy204ueryl0uzxpjjpan4fwkjwd8mlrkpfffe6vuweeq0x7wn3rhljj7qaedye4kuhpj9fmcu4zl0cn6up0syg60hrc70x9edf2gy0shft -10_000,pizza,penumbra1u8eg9v2dlge3yd7gjkdmnzj3quvgwy204ueryl0uzxpjjpan4fwkjwd8mlrkpfffe6vuweeq0x7wn3rhljj7qaedye4kuhpj9fmcu4zl0cn6up0syg60hrc70x9edf2gy0shft -100,cube,penumbra1u8eg9v2dlge3yd7gjkdmnzj3quvgwy204ueryl0uzxpjjpan4fwkjwd8mlrkpfffe6vuweeq0x7wn3rhljj7qaedye4kuhpj9fmcu4zl0cn6up0syg60hrc70x9edf2gy0shft -500_000,test_usd,penumbra1u8eg9v2dlge3yd7gjkdmnzj3quvgwy204ueryl0uzxpjjpan4fwkjwd8mlrkpfffe6vuweeq0x7wn3rhljj7qaedye4kuhpj9fmcu4zl0cn6up0syg60hrc70x9edf2gy0shft 1_000_000__000_000,upenumbra,penumbra1r70z64vqnmv28rttpv60hppd7gca9w5a3zwctx4c6yrtfd4mlvjrevkx3l2swdnw8vq7amylgfwaq2x97dlxmmh0flpxetmd8gjf0rpmg083ms94psmvpgy7hgdhwwn6nr92sc 20_000,gm,penumbra1r70z64vqnmv28rttpv60hppd7gca9w5a3zwctx4c6yrtfd4mlvjrevkx3l2swdnw8vq7amylgfwaq2x97dlxmmh0flpxetmd8gjf0rpmg083ms94psmvpgy7hgdhwwn6nr92sc 20_000,gn,penumbra1r70z64vqnmv28rttpv60hppd7gca9w5a3zwctx4c6yrtfd4mlvjrevkx3l2swdnw8vq7amylgfwaq2x97dlxmmh0flpxetmd8gjf0rpmg083ms94psmvpgy7hgdhwwn6nr92sc @@ -89,18 +77,30 @@ amount,denom,address 10_000,pizza,penumbra1q87euedu4yw3f8c025zulgu6tgc8frmhak7ge6xwyauuptlp4z25mn92z6qpa9nhdp08rfs3lakxqpgpd58m8u02cjcrpec0xdt6t9zlgq7fkxmklvtpktykwn0rdlxeqyny2n 100,cube,penumbra1q87euedu4yw3f8c025zulgu6tgc8frmhak7ge6xwyauuptlp4z25mn92z6qpa9nhdp08rfs3lakxqpgpd58m8u02cjcrpec0xdt6t9zlgq7fkxmklvtpktykwn0rdlxeqyny2n 500_000,test_usd,penumbra1q87euedu4yw3f8c025zulgu6tgc8frmhak7ge6xwyauuptlp4z25mn92z6qpa9nhdp08rfs3lakxqpgpd58m8u02cjcrpec0xdt6t9zlgq7fkxmklvtpktykwn0rdlxeqyny2n -1_000_000__000_000,upenumbra,penumbra1zn54t9n55da28vkytffkkrlfxcwutn4nlj05xwfw4jmgzr60jfqazvahzwz2ts9j0juru8r8ghwr47klmcvuhny6jfcunahs84z7fmgkk54xvfmvveqh4yjc69sg0jrf92r6kn -20_000,gm,penumbra1zn54t9n55da28vkytffkkrlfxcwutn4nlj05xwfw4jmgzr60jfqazvahzwz2ts9j0juru8r8ghwr47klmcvuhny6jfcunahs84z7fmgkk54xvfmvveqh4yjc69sg0jrf92r6kn -20_000,gn,penumbra1zn54t9n55da28vkytffkkrlfxcwutn4nlj05xwfw4jmgzr60jfqazvahzwz2ts9j0juru8r8ghwr47klmcvuhny6jfcunahs84z7fmgkk54xvfmvveqh4yjc69sg0jrf92r6kn -10_000,pizza,penumbra1zn54t9n55da28vkytffkkrlfxcwutn4nlj05xwfw4jmgzr60jfqazvahzwz2ts9j0juru8r8ghwr47klmcvuhny6jfcunahs84z7fmgkk54xvfmvveqh4yjc69sg0jrf92r6kn -100,cube,penumbra1zn54t9n55da28vkytffkkrlfxcwutn4nlj05xwfw4jmgzr60jfqazvahzwz2ts9j0juru8r8ghwr47klmcvuhny6jfcunahs84z7fmgkk54xvfmvveqh4yjc69sg0jrf92r6kn -500_000,test_usd,penumbra1zn54t9n55da28vkytffkkrlfxcwutn4nlj05xwfw4jmgzr60jfqazvahzwz2ts9j0juru8r8ghwr47klmcvuhny6jfcunahs84z7fmgkk54xvfmvveqh4yjc69sg0jrf92r6kn 1_000_000__000_000,upenumbra,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg 20_000,gm,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg 20_000,gn,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg 10_000,pizza,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg 100,cube,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg 500_000,test_usd,penumbra10kqp4qk220hc2flys45czh2vsc30jwl4mfy8styvjnz0zlhtw6gef74k0v2r7nqghlkuauxd9wafpz2nh8rjfqp747hc3hxakml793rjau6ljnm46vlmnpmczt4jkq8xcdakgg +1_000_000__000_000,upenumbra,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +20_000,gm,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +20_000,gn,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +10_000,pizza,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +100,cube,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +500_000,test_usd,penumbra17p75gersqlwg2cqyn8yn47f5eyl4wk36g9g9spq75gpnfd5s4wu740djd757f5e9afv2whtx844lpqr440qnwzanum34y05j5fz3uxjsdz6zpkrej7qlp6pr50v6l74jan2rzt +1_000_000__000_000,upenumbra,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +20_000,gm,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +20_000,gn,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +10_000,pizza,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +100,cube,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +500_000,test_usd,penumbra1d6rgvasgspaj8uhzsmfcvu2t96m4m94jgpxeapvjhpzq4h8v2yv307zxx83cfv3vhuyxyw7qu3cqk94yssj5zenhxlshu2y06yayfsh82zhryh8pc550lcvs9xe7nanzkuvlgz +1_000_000__000_000,upenumbra,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +20_000,gm,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +20_000,gn,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +10_000,pizza,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +100,cube,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m +500_000,test_usd,penumbra1ky24hfyrqxwl9vwlqnpsm4q4ryc5e8nlaxu3efw9mnl25nqjj47g7kluzpfu6w2a5w0ymzm07uwt0zska8v99qsu2qkuwnmgjfhf7qhea6aqqt9yevxjze2sml7hux7px2wp5m 5_000_000__000_000,upenumbra,penumbra1xq2e9x7uhfzezwunvazdamlxepf4jr5htsuqnzlsahuayyqxjjwg9lk0aytwm6wfj3jy29rv2kdpen57903s8wxv3jmqwj6m6v5jgn6y2cypfd03rke652k8wmavxra7e9wkrg 50_000,gm,penumbra1xq2e9x7uhfzezwunvazdamlxepf4jr5htsuqnzlsahuayyqxjjwg9lk0aytwm6wfj3jy29rv2kdpen57903s8wxv3jmqwj6m6v5jgn6y2cypfd03rke652k8wmavxra7e9wkrg 50_000,gn,penumbra1xq2e9x7uhfzezwunvazdamlxepf4jr5htsuqnzlsahuayyqxjjwg9lk0aytwm6wfj3jy29rv2kdpen57903s8wxv3jmqwj6m6v5jgn6y2cypfd03rke652k8wmavxra7e9wkrg @@ -407,3 +407,8 @@ amount,denom,address 2_000__000_000,upenumbra,penumbra147mfall0zr6am5r45qkwht7xqqrdsp50czde7empv7yq2nk3z8yyfh9k9520ddgswkmzar22vhz9dwtuem7uxw0qytfpv7lk3q9dp8ccaw2fn5c838rfackazmgf3ahh09cxmz 1_000,test_usd,penumbra1vmmz304hjlkjq6xv4al5dqumvgk3ek82rneagj07vdqkudjvl6y7zxzr5k6qq24yc7yyyekpu9qm7ef3acg2u8p950hs6hu3e73guq5pfmmvm63qudfx4qmg8h7fdweyw3ektn 1_000__000_000,upenumbra,penumbra1vmmz304hjlkjq6xv4al5dqumvgk3ek82rneagj07vdqkudjvl6y7zxzr5k6qq24yc7yyyekpu9qm7ef3acg2u8p950hs6hu3e73guq5pfmmvm63qudfx4qmg8h7fdweyw3ektn +200__000_000,upenumbra,penumbra1k8ek3e8f953u080n06rph4va80zlnmy30gg39dpx4gf4hcf2z7fujq6gm2cjwgruzksh2zv0zzuvwa8fgy5a2szdrgsuvq3tpgcs4yt2s9se6p966dgdf2khepnsqejg3fw3gm +200__000_000,upenumbra,penumbra1pvm0wztw203f6363va6zfp6se3pcl7mty44en34mjc60hwtsrxuz30mfnyasjdvg3725vyy6gwr2cg30kua9thssu3ew56l4wf46pld3qmeatjclsmcmqmy72kjh5cu2lf2jy0 +200__000_000,upenumbra,penumbra1gzngu24e2crknq8mea437el5cx5scr8w34eucl5jp70jguxt660ta2vwzu4dkc2wlmhdxncdd4r7v4fcyq7r7px57kwy5w0zx8euzj3ck2l9nqmlng3flvzsrztz7sxdg0e27d +200__000_000,upenumbra,penumbra1tz0yc6eqr0dkhywvz5ufwghhrycp2ze9lez0xj6r7zfv7s7yyu2gen9uspy7ydp7fglpgnuu967flp49zvkzmht9my653wqhykgpnhww258eh6f6fluzq0vqsl2hlan5lqqrxq +200__000_000,upenumbra,penumbra1z4mqp3dtnkum03rwahq7xan37467pkgclen5p7wlfpexqjhpwmuwc8gzgxyqrqqw2zg8rsa8t3hvdaw7ucfwzez9whwquzglrajlygrh62jj9lfnp9gmpr4jwgzx4503c8mh9z diff --git a/testnets/new-testnet.sh b/testnets/new-testnet.sh index 101b99a0f8..2fbfc0ac74 100755 --- a/testnets/new-testnet.sh +++ b/testnets/new-testnet.sh @@ -5,7 +5,7 @@ set -euo pipefail echo "#####################################################" PREVIOUS_TESTNET_DIRECTORY=$(find . -mindepth 1 -type d | sort | tail -n1) echo "previous testnet directory: $PREVIOUS_TESTNET_DIRECTORY" -PREVIOUS_TESTNET_NUMBER=$(find . -mindepth 1 -type d | wc -l) +PREVIOUS_TESTNET_NUMBER="$(find . -mindepth 1 -type d -exec basename {} \; | tail -n 1 | grep -Po '^\d+')" echo "previous testnet number: $PREVIOUS_TESTNET_NUMBER" NEW_TESTNET_NUMBER="0$(echo "1 + $PREVIOUS_TESTNET_NUMBER" | bc)" echo "new testnet number: $NEW_TESTNET_NUMBER" @@ -15,8 +15,15 @@ echo "#####################################################" echo "Creating new testnet directory $NEW_TESTNET_DIRECTORY..." mkdir "$NEW_TESTNET_DIRECTORY" -echo "Copying validators from $PREVIOUS_TESTNET_DIRECTORY to $NEW_TESTNET_DIRECTORY" -cp "$PREVIOUS_TESTNET_DIRECTORY/validators.json" "$NEW_TESTNET_DIRECTORY/validators.json" +if [[ -e "$PREVIOUS_TESTNET_DIRECTORY/validators.json" ]]; then + echo "Copying validators from $PREVIOUS_TESTNET_DIRECTORY to $NEW_TESTNET_DIRECTORY" + cp -v "$PREVIOUS_TESTNET_DIRECTORY/validators.json" "$NEW_TESTNET_DIRECTORY/validators.json" +else + echo "Using default CI validator config" + # We inspect the validators config and pluck the first entry out, for a solo-validator setup. + # TODO: update pd to take an `--n-validators` arg so this is dynamic. + jq '.[0]' "validators-ci.json" | jq -s > "$NEW_TESTNET_DIRECTORY/validators.json" +fi echo "Setting up allocations for new testnet..." # Truncate file, set CSV headers. @@ -90,6 +97,15 @@ while read -r a; do EOM done < <(cut -d' ' -f1 "test_address_1.txt") + +# Miscellaneous "small" accounts, with just a bit of staking token to pay fees. +# Useful for e.g. bootstrapping relayers on testnets/devnets. +while read -r a; do + cat <> base_allocations.csv +200__000_000,upenumbra,$a +EOM +done < <(cut -d' ' -f1 "small_addresses.txt") + # Copy new base allocations file to target testnet dir. cp -v base_allocations.csv "$NEW_TESTNET_DIRECTORY/allocations.csv" diff --git a/testnets/small_addresses.txt b/testnets/small_addresses.txt new file mode 100644 index 0000000000..6514c88c03 --- /dev/null +++ b/testnets/small_addresses.txt @@ -0,0 +1,5 @@ +penumbra1k8ek3e8f953u080n06rph4va80zlnmy30gg39dpx4gf4hcf2z7fujq6gm2cjwgruzksh2zv0zzuvwa8fgy5a2szdrgsuvq3tpgcs4yt2s9se6p966dgdf2khepnsqejg3fw3gm # hermes-osmosis +penumbra1pvm0wztw203f6363va6zfp6se3pcl7mty44en34mjc60hwtsrxuz30mfnyasjdvg3725vyy6gwr2cg30kua9thssu3ew56l4wf46pld3qmeatjclsmcmqmy72kjh5cu2lf2jy0 # hermes-noble +penumbra1gzngu24e2crknq8mea437el5cx5scr8w34eucl5jp70jguxt660ta2vwzu4dkc2wlmhdxncdd4r7v4fcyq7r7px57kwy5w0zx8euzj3ck2l9nqmlng3flvzsrztz7sxdg0e27d # hermes-unused-1 +penumbra1tz0yc6eqr0dkhywvz5ufwghhrycp2ze9lez0xj6r7zfv7s7yyu2gen9uspy7ydp7fglpgnuu967flp49zvkzmht9my653wqhykgpnhww258eh6f6fluzq0vqsl2hlan5lqqrxq # hermes-unused-2 +penumbra1z4mqp3dtnkum03rwahq7xan37467pkgclen5p7wlfpexqjhpwmuwc8gzgxyqrqqw2zg8rsa8t3hvdaw7ucfwzez9whwquzglrajlygrh62jj9lfnp9gmpr4jwgzx4503c8mh9z # hermes-unused-3 diff --git a/testnets/validators-ci.json b/testnets/validators-ci.json index f740f0f63e..b6635b58f9 100644 --- a/testnets/validators-ci.json +++ b/testnets/validators-ci.json @@ -3,64 +3,14 @@ "name": "Penumbra Labs CI 1", "website": "https://penumbra.zone", "description": "This is a validator run by Penumbra Labs, using testnets as a public CI", - "funding_streams": [ - [ - 50, - "penumbra1fcy6crf6u4r450k8y4nye43puxet2ytfh7s0dzxsxjk68czej9mp37xv49np0clv4dc8cwg4re0xfs79uwlfehnja4p0revmlek0drezxfse8spg3qc6gux6vyuzuulse7xuxv" - ], - [ - 50, - "penumbra13ahs2s8ms6q0utgetty3zflwteepg87gqm88sqqcdj2mjhhydkykwu6n7dk557x84aa9a6cqhdytw0zk33xjgmuedprrlunc86up6zps8juej9rpuuydjtk7jaxpmrw27gfu9x" - ], - [ - 50, - "penumbra1uw03wyt49u7wm5wgu4nvkdt0v48fdaw5y4az4xlgmnp6ucs6th4xd0zg8wqxwndwfv286ktjwgemyhrxqu0d5qjf8dapr57l3k8yqs09vw9m5ywxsx9hjj2dj4qwnrl2rzxdh9" - ], - [ - 50, - "penumbra1w6em8sdx0467ug9kk0s0sng254tqjfk9gglv6ff7dq2v8arwekevkjte9udzmsj9l83mz74747tj0a49w2vhecxj7ac4upr5c5pvjqhsy7dwn422m8dgdekt7y4lmad026njsv" - ], - [ - 50, - "penumbra1jp4pryqqmh65pq8e7zwk6k2674vwhn4qqphxjk0vukxln0crmp2tdld0mhavuyrspwuajnsk5t5t33u2auxvheunr7qde4l068ez0euvtu08z7rwj6shlh64ndz0wvz7cmu29z" - ], - [ - 50, - "penumbra1hum845ches70c8kp8zfx7nerjwfe653hxsrpgwepwtspcp4jy6ytnxhe5kwn56sku684x6zzqcwp5ycrkee5mmg9kdl3jkr5lqn2xq3kqxvp4d7gwqdue5jznk2ter2teg83gu" - ] - ], + "funding_streams": [], "sequence_number": 0 }, { "name": "Penumbra Labs CI 2", "website": "https://penumbra.zone", "description": "This is a validator run by Penumbra Labs, using testnets as a public CI", - "funding_streams": [ - [ - 50, - "penumbra1fcy6crf6u4r450k8y4nye43puxet2ytfh7s0dzxsxjk68czej9mp37xv49np0clv4dc8cwg4re0xfs79uwlfehnja4p0revmlek0drezxfse8spg3qc6gux6vyuzuulse7xuxv" - ], - [ - 50, - "penumbra13ahs2s8ms6q0utgetty3zflwteepg87gqm88sqqcdj2mjhhydkykwu6n7dk557x84aa9a6cqhdytw0zk33xjgmuedprrlunc86up6zps8juej9rpuuydjtk7jaxpmrw27gfu9x" - ], - [ - 50, - "penumbra1uw03wyt49u7wm5wgu4nvkdt0v48fdaw5y4az4xlgmnp6ucs6th4xd0zg8wqxwndwfv286ktjwgemyhrxqu0d5qjf8dapr57l3k8yqs09vw9m5ywxsx9hjj2dj4qwnrl2rzxdh9" - ], - [ - 50, - "penumbra1w6em8sdx0467ug9kk0s0sng254tqjfk9gglv6ff7dq2v8arwekevkjte9udzmsj9l83mz74747tj0a49w2vhecxj7ac4upr5c5pvjqhsy7dwn422m8dgdekt7y4lmad026njsv" - ], - [ - 50, - "penumbra1jp4pryqqmh65pq8e7zwk6k2674vwhn4qqphxjk0vukxln0crmp2tdld0mhavuyrspwuajnsk5t5t33u2auxvheunr7qde4l068ez0euvtu08z7rwj6shlh64ndz0wvz7cmu29z" - ], - [ - 50, - "penumbra1hum845ches70c8kp8zfx7nerjwfe653hxsrpgwepwtspcp4jy6ytnxhe5kwn56sku684x6zzqcwp5ycrkee5mmg9kdl3jkr5lqn2xq3kqxvp4d7gwqdue5jznk2ter2teg83gu" - ] - ], + "funding_streams": [], "sequence_number": 0 } ] From 9443d9816e188a8488821f4d901fa7b5e3d61261 Mon Sep 17 00:00:00 2001 From: aubrey <73075391+aubrika@users.noreply.github.com> Date: Thu, 5 Sep 2024 11:10:35 -0700 Subject: [PATCH 09/43] pcli: add balance migration command (#4842) initial attempt at implementing a balance migration function for pcli - does not currently handle non-zero fees correctly, but that seems to be the only remaining thing to correctly implement - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > client/planner only changes Co-authored-by: Lucas Meier Co-authored-by: Conor Schaefer --- crates/bin/pcli/src/command.rs | 18 +++-- crates/bin/pcli/src/command/migrate.rs | 98 ++++++++++++++++++++++++++ crates/bin/pcli/src/main.rs | 1 + 3 files changed, 111 insertions(+), 6 deletions(-) create mode 100644 crates/bin/pcli/src/command/migrate.rs diff --git a/crates/bin/pcli/src/command.rs b/crates/bin/pcli/src/command.rs index c7c787eaf6..420964e6e6 100644 --- a/crates/bin/pcli/src/command.rs +++ b/crates/bin/pcli/src/command.rs @@ -1,5 +1,6 @@ pub use debug::DebugCmd; pub use init::InitCmd; +pub use migrate::MigrateCmd; pub use query::QueryCmd; pub use threshold::ThresholdCmd; pub use tx::TxCmd; @@ -11,6 +12,7 @@ use self::{ceremony::CeremonyCmd, tx::TxCmdWithOptions}; mod ceremony; mod debug; mod init; +mod migrate; mod query; mod threshold; mod tx; @@ -53,18 +55,21 @@ pub enum Command { /// Create and broadcast a transaction. #[clap(display_order = 400, visible_alias = "tx")] Transaction(TxCmdWithOptions), + /// Follow the threshold signing protocol. + #[clap(subcommand, display_order = 500)] + Threshold(ThresholdCmd), + /// Migrate your balance to another wallet. + #[clap(subcommand, display_order = 600)] + Migrate(MigrateCmd), /// Manage a validator. #[clap(subcommand, display_order = 900)] Validator(ValidatorCmd), - /// Display information related to diagnosing problems running Penumbra - #[clap(subcommand, display_order = 999)] - Debug(DebugCmd), /// Contribute to the summoning ceremony. #[clap(subcommand, display_order = 990)] Ceremony(CeremonyCmd), - /// Follow the threshold signing protocol. - #[clap(subcommand, display_order = 500)] - Threshold(ThresholdCmd), + /// Display information related to diagnosing problems running Penumbra + #[clap(subcommand, display_order = 999)] + Debug(DebugCmd), } impl Command { @@ -79,6 +84,7 @@ impl Command { Command::Debug(cmd) => cmd.offline(), Command::Ceremony(_) => false, Command::Threshold(cmd) => cmd.offline(), + Command::Migrate(_) => false, } } } diff --git a/crates/bin/pcli/src/command/migrate.rs b/crates/bin/pcli/src/command/migrate.rs new file mode 100644 index 0000000000..df81b13bed --- /dev/null +++ b/crates/bin/pcli/src/command/migrate.rs @@ -0,0 +1,98 @@ +use crate::App; +use anyhow::{Context, Result}; +use penumbra_keys::FullViewingKey; +use penumbra_proto::view::v1::GasPricesRequest; +use penumbra_view::ViewClient; +use penumbra_wallet::plan::Planner; +use rand_core::OsRng; +use std::{io::Write, str::FromStr}; +use termion::input::TermRead; + +#[derive(Debug, clap::Parser)] +pub enum MigrateCmd { + /// Migrate your entire balance to another wallet. + /// + /// All assets from all accounts in the source wallet will be sent to the destination wallet. + /// A FullViewingKey must be provided for the destination wallet. + /// All funds will be deposited in the account 0 of the destination wallet, + /// minus any gas prices for the migration transaction. + #[clap(name = "balance")] + Balance, +} + +impl MigrateCmd { + #[tracing::instrument(skip(self, app))] + pub async fn exec(&self, app: &mut App) -> Result<()> { + let gas_prices = app + .view + .as_mut() + .context("view service must be initialized")? + .gas_prices(GasPricesRequest {}) + .await? + .into_inner() + .gas_prices + .expect("gas prices must be available") + .try_into()?; + + print!("Enter FVK: "); + std::io::stdout().flush()?; + let to: String = std::io::stdin().lock().read_line()?.unwrap_or_default(); + + match self { + MigrateCmd::Balance => { + let source_fvk = app.config.full_viewing_key.clone(); + + let dest_fvk = to.parse::().map_err(|_| { + anyhow::anyhow!("The provided string is not a valid FullViewingKey.") + })?; + + let mut planner = Planner::new(OsRng); + + let (dest_address, _) = FullViewingKey::payment_address( + &FullViewingKey::from_str(&to[..])?, + Default::default(), + ); + + planner + .set_gas_prices(gas_prices) + .set_fee_tier(Default::default()) + .change_address(dest_address); + + // Return all unspent notes from the view service + let notes = app + .view + .as_mut() + .context("view service must be initialized")? + .unspent_notes_by_account_and_asset() + .await?; + + for notes in notes.into_values() { + for notes in notes.into_values() { + for note in notes { + planner.spend(note.note, note.position); + } + } + } + + let memo = format!("Migrating balance from {} to {}", source_fvk, dest_fvk); + let plan = planner + .memo(memo) + .plan( + app.view + .as_mut() + .context("view service must be initialized")?, + Default::default(), + ) + .await + .context("can't build send transaction")?; + + if plan.actions.is_empty() { + anyhow::bail!("migration plan contained zero actions: is the source wallet already empty?"); + } + app.build_and_submit_transaction(plan).await?; + + Result::Ok(()) + } + } + } +} diff --git a/crates/bin/pcli/src/main.rs b/crates/bin/pcli/src/main.rs index b308769aac..29f23a61b9 100644 --- a/crates/bin/pcli/src/main.rs +++ b/crates/bin/pcli/src/main.rs @@ -65,6 +65,7 @@ async fn main() -> Result<()> { Command::Query(cmd) => cmd.exec(&mut app).await?, Command::Ceremony(cmd) => cmd.exec(&mut app).await?, Command::Threshold(cmd) => cmd.exec(&mut app).await?, + Command::Migrate(cmd) => cmd.exec(&mut app).await?, } Ok(()) From 685599d74b299a358d0ee378536317251513ee1b Mon Sep 17 00:00:00 2001 From: Lucas Meier Date: Fri, 6 Sep 2024 11:45:27 -0700 Subject: [PATCH 10/43] pcli migrate balance: replicate account structure This changes the migrate balance command to replicate the balances of each asset per account, as required. Previously, the command would move all the values into the 0 account. This will pull fees from the account with the most amount of the staking / fee token. This differs slightly from iterating through account numbers starting from 0, but is much much simpler. --- crates/bin/pcli/src/command/migrate.rs | 64 ++++++++++++++++++-------- 1 file changed, 46 insertions(+), 18 deletions(-) diff --git a/crates/bin/pcli/src/command/migrate.rs b/crates/bin/pcli/src/command/migrate.rs index df81b13bed..4ef7228dd4 100644 --- a/crates/bin/pcli/src/command/migrate.rs +++ b/crates/bin/pcli/src/command/migrate.rs @@ -1,13 +1,25 @@ use crate::App; -use anyhow::{Context, Result}; +use anyhow::{anyhow, Context, Result}; +use penumbra_asset::{asset, Value, STAKING_TOKEN_ASSET_ID}; use penumbra_keys::FullViewingKey; +use penumbra_num::Amount; use penumbra_proto::view::v1::GasPricesRequest; use penumbra_view::ViewClient; use penumbra_wallet::plan::Planner; use rand_core::OsRng; -use std::{io::Write, str::FromStr}; +use std::{collections::HashMap, io::Write}; use termion::input::TermRead; +fn read_fvk() -> Result { + print!("Enter FVK: "); + std::io::stdout().flush()?; + let fvk_string: String = std::io::stdin().lock().read_line()?.unwrap_or_default(); + + fvk_string + .parse::() + .map_err(|_| anyhow::anyhow!("The provided string is not a valid FullViewingKey.")) +} + #[derive(Debug, clap::Parser)] pub enum MigrateCmd { /// Migrate your entire balance to another wallet. @@ -34,29 +46,17 @@ impl MigrateCmd { .expect("gas prices must be available") .try_into()?; - print!("Enter FVK: "); - std::io::stdout().flush()?; - let to: String = std::io::stdin().lock().read_line()?.unwrap_or_default(); - match self { MigrateCmd::Balance => { let source_fvk = app.config.full_viewing_key.clone(); - let dest_fvk = to.parse::().map_err(|_| { - anyhow::anyhow!("The provided string is not a valid FullViewingKey.") - })?; + let dest_fvk = read_fvk()?; let mut planner = Planner::new(OsRng); - let (dest_address, _) = FullViewingKey::payment_address( - &FullViewingKey::from_str(&to[..])?, - Default::default(), - ); - planner .set_gas_prices(gas_prices) - .set_fee_tier(Default::default()) - .change_address(dest_address); + .set_fee_tier(Default::default()); // Return all unspent notes from the view service let notes = app @@ -66,14 +66,42 @@ impl MigrateCmd { .unspent_notes_by_account_and_asset() .await?; - for notes in notes.into_values() { + let mut account_values: HashMap<(u32, asset::Id), Amount> = HashMap::new(); + + for (account, notes) in notes { for notes in notes.into_values() { for note in notes { - planner.spend(note.note, note.position); + let position = note.position; + let note = note.note; + let value = note.value(); + planner.spend(note, position); + *account_values.entry((account, value.asset_id)).or_default() += + value.amount; } } } + // We'll use the account with the most amount of the fee token to pay fees. + // + // If this fails, then it won't be possible to migrate. + let (&(largest_account, _), _) = account_values + .iter() + .filter(|((_, asset), _)| *asset == *STAKING_TOKEN_ASSET_ID) + .max_by_key(|&(_, &amount)| amount) + .ok_or(anyhow!("no account with the ability to pay fees exists"))?; + + // Set this account to be the change address. + planner.change_address(dest_fvk.payment_address(largest_account.into()).0); + + // Create explicit outputs for the other addresses. + for (&(account, asset_id), &amount) in &account_values { + if account == largest_account { + continue; + } + let (address, _) = dest_fvk.payment_address(account.into()); + planner.output(Value { asset_id, amount }, address); + } + let memo = format!("Migrating balance from {} to {}", source_fvk, dest_fvk); let plan = planner .memo(memo) From 09a59008b55b2b8b58d915da1e7ee89f8fff6f9a Mon Sep 17 00:00:00 2001 From: Chris Czub Date: Tue, 10 Sep 2024 14:25:36 -0400 Subject: [PATCH 11/43] Properly sum Penumbra-minted values returned in total supply API (#4836) ## Describe your changes This fixes an issue in the `TotalSupply` API where tokens minted on Penumbra and received from different counterparties weren't correctly summing together. It should be noted that this API returns two subtly different values depending on the asset: * For a Penumbra-minted token, it's really the "Total Outstanding IBC Supply" API rather than the "Total Supply" API -- it will return the amount of the token that has been sent to and remains on other chains * For an externally-originating token, it indicates the total supply of the token transferred into and remaining on the Penumbra chain ## Checklist before requesting a review - [ ] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > no consensus changes --- .../shielded-pool/src/component/rpc/bank_query.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/core/component/shielded-pool/src/component/rpc/bank_query.rs b/crates/core/component/shielded-pool/src/component/rpc/bank_query.rs index e774f731ef..2af2edae7c 100644 --- a/crates/core/component/shielded-pool/src/component/rpc/bank_query.rs +++ b/crates/core/component/shielded-pool/src/component/rpc/bank_query.rs @@ -30,7 +30,7 @@ use super::Server; impl BankQuery for Server { /// Returns the total supply for all IBC assets. /// Internally-minted assets (Penumbra tokens, LP tokens, delegation tokens, etc.) - /// are also included but the supplies are hardcoded at 0 for now. + /// are also included but the supplies are will only reflect what has been transferred out. /// /// TODO: Implement a way to fetch the total supply for these assets. /// TODO: implement pagination @@ -107,7 +107,11 @@ impl BankQuery for Server { } let denom_metadata = denom_metadata.expect("should not be an error"); - total_supply.insert(denom_metadata, amount); + // Add to the total supply seen for this denom. + total_supply + .entry(denom_metadata) + .and_modify(|a| *a += amount) + .or_insert(amount); } Ok(tonic::Response::new(QueryTotalSupplyResponse { From 7f7c3c6f17d437ec773a38f0a70248b340282d3a Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Tue, 10 Sep 2024 12:41:02 -0700 Subject: [PATCH 12/43] chore: release version 0.80.4 --- Cargo.lock | 96 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 49 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c8f77b85b1..7d770d09ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1233,7 +1233,7 @@ dependencies = [ [[package]] name = "cnidarium" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "async-trait", @@ -1269,7 +1269,7 @@ dependencies = [ [[package]] name = "cnidarium-component" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "async-trait", @@ -1307,7 +1307,7 @@ dependencies = [ [[package]] name = "cometindex" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "async-trait", @@ -1668,7 +1668,7 @@ dependencies = [ [[package]] name = "decaf377-fmd" -version = "0.80.3" +version = "0.80.4" dependencies = [ "ark-ff", "ark-serialize", @@ -1683,7 +1683,7 @@ dependencies = [ [[package]] name = "decaf377-frost" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -1698,7 +1698,7 @@ dependencies = [ [[package]] name = "decaf377-ka" -version = "0.80.3" +version = "0.80.4" dependencies = [ "ark-ff", "decaf377", @@ -4213,7 +4213,7 @@ dependencies = [ [[package]] name = "pcli" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -4295,7 +4295,7 @@ dependencies = [ [[package]] name = "pclientd" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "assert_cmd", @@ -4347,7 +4347,7 @@ dependencies = [ [[package]] name = "pd" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -4500,7 +4500,7 @@ dependencies = [ [[package]] name = "penumbra-app" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -4588,7 +4588,7 @@ dependencies = [ [[package]] name = "penumbra-asset" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -4628,7 +4628,7 @@ dependencies = [ [[package]] name = "penumbra-auction" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -4683,7 +4683,7 @@ dependencies = [ [[package]] name = "penumbra-auto-https" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "axum-server", @@ -4695,7 +4695,7 @@ dependencies = [ [[package]] name = "penumbra-bench" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-bls12-377", @@ -4739,7 +4739,7 @@ dependencies = [ [[package]] name = "penumbra-community-pool" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -4771,7 +4771,7 @@ dependencies = [ [[package]] name = "penumbra-compact-block" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -4806,7 +4806,7 @@ dependencies = [ [[package]] name = "penumbra-custody" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "argon2", @@ -4842,7 +4842,7 @@ dependencies = [ [[package]] name = "penumbra-dex" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -4904,7 +4904,7 @@ dependencies = [ [[package]] name = "penumbra-distributions" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "async-trait", @@ -4922,7 +4922,7 @@ dependencies = [ [[package]] name = "penumbra-eddy" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -4940,7 +4940,7 @@ dependencies = [ [[package]] name = "penumbra-fee" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -4967,7 +4967,7 @@ dependencies = [ [[package]] name = "penumbra-funding" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "async-trait", @@ -4990,7 +4990,7 @@ dependencies = [ [[package]] name = "penumbra-governance" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -5044,7 +5044,7 @@ dependencies = [ [[package]] name = "penumbra-ibc" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -5081,7 +5081,7 @@ dependencies = [ [[package]] name = "penumbra-keys" -version = "0.80.3" +version = "0.80.4" dependencies = [ "aes", "anyhow", @@ -5128,7 +5128,7 @@ dependencies = [ [[package]] name = "penumbra-measure" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "bytesize", @@ -5146,7 +5146,7 @@ dependencies = [ [[package]] name = "penumbra-mock-client" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "cnidarium", @@ -5163,7 +5163,7 @@ dependencies = [ [[package]] name = "penumbra-mock-consensus" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "bytes", @@ -5183,7 +5183,7 @@ dependencies = [ [[package]] name = "penumbra-mock-tendermint-proxy" -version = "0.80.3" +version = "0.80.4" dependencies = [ "hex", "pbjson-types", @@ -5198,7 +5198,7 @@ dependencies = [ [[package]] name = "penumbra-num" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -5235,7 +5235,7 @@ dependencies = [ [[package]] name = "penumbra-proof-params" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ec", @@ -5263,7 +5263,7 @@ dependencies = [ [[package]] name = "penumbra-proof-setup" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ec", @@ -5290,7 +5290,7 @@ dependencies = [ [[package]] name = "penumbra-proto" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "async-trait", @@ -5324,7 +5324,7 @@ dependencies = [ [[package]] name = "penumbra-sct" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -5360,7 +5360,7 @@ dependencies = [ [[package]] name = "penumbra-shielded-pool" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -5414,7 +5414,7 @@ dependencies = [ [[package]] name = "penumbra-stake" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -5467,7 +5467,7 @@ dependencies = [ [[package]] name = "penumbra-tct" -version = "0.80.3" +version = "0.80.4" dependencies = [ "ark-ed-on-bls12-377", "ark-ff", @@ -5499,7 +5499,7 @@ dependencies = [ [[package]] name = "penumbra-tct-property-test" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "futures", @@ -5511,7 +5511,7 @@ dependencies = [ [[package]] name = "penumbra-tct-visualize" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "axum", @@ -5541,7 +5541,7 @@ dependencies = [ [[package]] name = "penumbra-tendermint-proxy" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "chrono", @@ -5573,7 +5573,7 @@ dependencies = [ [[package]] name = "penumbra-test-subscriber" -version = "0.80.3" +version = "0.80.4" dependencies = [ "tracing", "tracing-subscriber 0.3.18", @@ -5581,7 +5581,7 @@ dependencies = [ [[package]] name = "penumbra-tower-trace" -version = "0.80.3" +version = "0.80.4" dependencies = [ "futures", "hex", @@ -5602,7 +5602,7 @@ dependencies = [ [[package]] name = "penumbra-transaction" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-ff", @@ -5655,7 +5655,7 @@ dependencies = [ [[package]] name = "penumbra-txhash" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "blake2b_simd 1.0.2", @@ -5668,7 +5668,7 @@ dependencies = [ [[package]] name = "penumbra-view" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-std", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "penumbra-wallet" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-std", @@ -5812,7 +5812,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pindexer" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "clap", @@ -7662,7 +7662,7 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "summonerd" -version = "0.80.3" +version = "0.80.4" dependencies = [ "anyhow", "ark-groth16", diff --git a/Cargo.toml b/Cargo.toml index 3a68331621..6ed6f2e0f3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,7 +104,7 @@ push = false [workspace.package] authors = ["Penumbra Labs "] edition = "2021" -version = "0.80.3" +version = "0.80.4" repository = "https://github.com/penumbra-zone/penumbra" homepage = "https://penumbra.zone" license = "MIT OR Apache-2.0" From 9ff993c6ef6e8bbaed4234736136b2ff22d08cc7 Mon Sep 17 00:00:00 2001 From: Chris Czub Date: Wed, 11 Sep 2024 14:12:33 -0400 Subject: [PATCH 13/43] Basic ICS23 Transfer Test (#4850) ## Describe your changes Replaces the existing IBC handshake test with a full ICS23 transfer test. Only the "happy path" (no timeouts or any other error cases) is tested in this initial spike. Also introduces some improvements to the underlying `MockClient` to support spent note tracking. The transfer implementation in the `MockRelayer` is currently hardcoded to transfer 50% of the first chain's client's first note to the second chain; this can be fleshed out further in response to future test requirements. ## Issue ticket number and link Closes #4846 ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > tests only --- .../app/tests/common/ibc_tests/relayer.rs | 414 +++++++++++++++++- .../{ibc_handshake.rs => ics23_transfer.rs} | 72 ++- crates/test/mock-client/src/lib.rs | 73 ++- crates/test/mock-consensus/src/block.rs | 42 +- 4 files changed, 584 insertions(+), 17 deletions(-) rename crates/core/app/tests/{ibc_handshake.rs => ics23_transfer.rs} (52%) diff --git a/crates/core/app/tests/common/ibc_tests/relayer.rs b/crates/core/app/tests/common/ibc_tests/relayer.rs index 05a73e1f59..488e68dcca 100644 --- a/crates/core/app/tests/common/ibc_tests/relayer.rs +++ b/crates/core/app/tests/common/ibc_tests/relayer.rs @@ -11,9 +11,12 @@ use { channel::{ channel::{Order, State as ChannelState}, msgs::{ - MsgChannelOpenAck, MsgChannelOpenConfirm, MsgChannelOpenInit, MsgChannelOpenTry, + MsgAcknowledgement, MsgChannelOpenAck, MsgChannelOpenConfirm, + MsgChannelOpenInit, MsgChannelOpenTry, MsgRecvPacket, }, - IdentifiedChannelEnd, Version as ChannelVersion, + packet::Sequence, + ChannelId, IdentifiedChannelEnd, Packet, PortId, TimeoutHeight, + Version as ChannelVersion, }, client::{ msgs::{MsgCreateClient, MsgUpdateClient}, @@ -35,20 +38,31 @@ use { header::Header as TendermintHeader, TrustThreshold, }, + timestamp::Timestamp, DomainType as _, }, + penumbra_asset::{asset::Cache, Value}, penumbra_ibc::{ component::{ChannelStateReadExt as _, ConnectionStateReadExt as _}, - IbcRelay, IBC_COMMITMENT_PREFIX, IBC_PROOF_SPECS, + IbcRelay, IbcToken, IBC_COMMITMENT_PREFIX, IBC_PROOF_SPECS, }, + penumbra_keys::keys::AddressIndex, + penumbra_num::Amount, penumbra_proto::{util::tendermint_proxy::v1::GetBlockByHeightRequest, DomainType}, + penumbra_shielded_pool::{Ics20Withdrawal, OutputPlan, SpendPlan}, penumbra_stake::state_key::chain, - penumbra_transaction::{TransactionParameters, TransactionPlan}, + penumbra_transaction::{ + memo::MemoPlaintext, plan::MemoPlan, TransactionParameters, TransactionPlan, + }, prost::Message as _, + rand::SeedableRng as _, rand_chacha::ChaCha12Core, sha2::Digest, - std::time::Duration, - tendermint::Time, + std::{ + str::FromStr as _, + time::{Duration, SystemTime, UNIX_EPOCH}, + }, + tendermint::{abci::Event, Time}, }; #[allow(unused)] pub struct MockRelayer { @@ -1380,6 +1394,389 @@ impl MockRelayer { Ok(()) } + + /// Sends an IBC transfer from chain A to chain B. + /// + /// Currently hardcoded to send 50% of the first note's value + /// on chain A. + pub async fn transfer_from_a_to_b(&mut self) -> Result<()> { + // Ensure chain A has balance to transfer + let chain_a_client = self.chain_a_ibc.client().await?; + let chain_b_client = self.chain_b_ibc.client().await?; + + let chain_a_note = chain_a_client + .notes + .values() + .cloned() + .next() + .ok_or_else(|| anyhow!("mock client had no note"))?; + + // Get the balance of that asset on chain A + let pretransfer_balance_a: Amount = chain_a_client + .spendable_notes_by_asset(chain_a_note.asset_id()) + .map(|n| n.value().amount) + .sum(); + + // Get the balance of that asset on chain B + // The asset ID of the IBC transferred asset on chain B + // needs to be computed. + let asset_cache = Cache::with_known_assets(); + let denom = asset_cache + .get(&chain_a_note.asset_id()) + .expect("asset ID should exist in asset cache") + .clone(); + let ibc_token = IbcToken::new( + &self.chain_b_ibc.channel_id, + &self.chain_b_ibc.port_id, + &denom.to_string(), + ); + let pretransfer_balance_b: Amount = chain_b_client + .spendable_notes_by_asset(ibc_token.id()) + .map(|n| n.value().amount) + .sum(); + + // We will transfer 50% of the `chain_a_note`'s value to the same address on chain B + let transfer_value = Value { + amount: (chain_a_note.amount().value() / 2).into(), + asset_id: chain_a_note.asset_id(), + }; + + // Prepare and perform the transfer from chain A to chain B + let destination_chain_address = chain_b_client.fvk.payment_address(AddressIndex::new(0)).0; + let denom = asset_cache + .get(&transfer_value.asset_id) + .expect("asset ID should exist in asset cache") + .clone(); + let amount = transfer_value.amount; + // TODO: test timeouts + // For this sunny path test, we'll set the timeouts very far in the future + let timeout_height = Height { + revision_height: 1_000_000, + revision_number: 0, + }; + // get the current time on the local machine + let current_time_ns = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time went backwards") + .as_nanos() as u64; + + // add 2 days to current time + let mut timeout_time = current_time_ns + 1.728e14 as u64; + + // round to the nearest 10 minutes + timeout_time += 600_000_000_000 - (timeout_time % 600_000_000_000); + + let return_address = chain_a_client + .fvk + .ephemeral_address( + rand_chacha::ChaChaRng::seed_from_u64(1312), + AddressIndex::new(0), + ) + .0; + let withdrawal = Ics20Withdrawal { + destination_chain_address: destination_chain_address.to_string(), + denom, + amount, + timeout_height, + timeout_time, + return_address, + // TODO: this is fine to hardcode for now but should ultimately move + // to the mock relayer and be based on the handshake + source_channel: ChannelId::from_str("channel-0")?, + // Penumbra <-> Penumbra so false + use_compat_address: false, + }; + // There will need to be `Spend` and `Output` actions + // within the transaction in order for it to balance + let spend_plan = SpendPlan::new( + &mut rand_chacha::ChaChaRng::seed_from_u64(1312), + chain_a_note.clone(), + chain_a_client + .position(chain_a_note.commit()) + .expect("note should be in mock client's tree"), + ); + let output_plan = OutputPlan::new( + &mut rand_chacha::ChaChaRng::seed_from_u64(1312), + // half the note is being withdrawn, so we can use `transfer_value` both for the withdrawal action + // and the change output + transfer_value.clone(), + chain_a_client.fvk.payment_address(AddressIndex::new(0)).0, + ); + + let plan = { + let ics20_msg = withdrawal.into(); + TransactionPlan { + actions: vec![ics20_msg, spend_plan.into(), output_plan.into()], + // Now fill out the remaining parts of the transaction needed for verification: + memo: Some(MemoPlan::new( + &mut rand_chacha::ChaChaRng::seed_from_u64(1312), + MemoPlaintext::blank_memo( + chain_a_client.fvk.payment_address(AddressIndex::new(0)).0, + ), + )), + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_a_ibc.chain_id.clone(), + ..Default::default() + }, + } + .with_populated_detection_data( + rand_chacha::ChaChaRng::seed_from_u64(1312), + Default::default(), + ) + }; + let tx = self + .chain_a_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + let (_end_block_events, deliver_tx_events) = self + .chain_a_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + self._sync_chains().await?; + + // Since multiple send_packet events can occur in a single deliver tx response, + // we accumulate all the events and process them in a loop. + let mut recv_tx_deliver_tx_events: Vec = Vec::new(); + // Now that the withdrawal has been processed on Chain A, the relayer + // tells chain B to process the transfer. It does this by forwarding a + // MsgRecvPacket to chain B. + // + // The relayer needs to extract the event that chain A emitted: + for event in deliver_tx_events.iter() { + if event.kind == "send_packet" { + let mut packet_data_hex = None; + let mut sequence = None; + let mut port_on_a = None; + let mut chan_on_a = None; + let mut port_on_b = None; + let mut chan_on_b = None; + let mut timeout_height_on_b = None; + let mut timeout_timestamp_on_b = None; + for attr in &event.attributes { + match attr.key.as_str() { + "packet_data_hex" => packet_data_hex = Some(attr.value.clone()), + "packet_sequence" => sequence = Some(attr.value.clone()), + "packet_src_port" => port_on_a = Some(attr.value.clone()), + "packet_src_channel" => chan_on_a = Some(attr.value.clone()), + "packet_dst_port" => port_on_b = Some(attr.value.clone()), + "packet_dst_channel" => chan_on_b = Some(attr.value.clone()), + "packet_timeout_height" => timeout_height_on_b = Some(attr.value.clone()), + "packet_timeout_timestamp" => { + timeout_timestamp_on_b = Some(attr.value.clone()) + } + _ => (), + } + } + + let port_on_a = port_on_a.expect("port_on_a attribute should be present"); + let chan_on_a = chan_on_a.expect("chan_on_a attribute should be present"); + let port_on_b = port_on_b.expect("port_on_b attribute should be present"); + let chan_on_b = chan_on_b.expect("chan_on_b attribute should be present"); + let sequence = sequence.expect("sequence attribute should be present"); + let timeout_height_on_b = + timeout_height_on_b.expect("timeout_height_on_b attribute should be present"); + let timeout_timestamp_on_b = timeout_timestamp_on_b + .expect("timeout_timestamp_on_b attribute should be present"); + let packet_data_hex = + packet_data_hex.expect("packet_data_hex attribute should be present"); + + // The relayer must fetch the packet commitment proof from chain A + // to include in the MsgRecvPacket + // For a real relayer this would be done with an abci request, but + // since we don't have a real cometbft node, we will just grab it + // from storage + let chain_a_snapshot = self.chain_a_ibc.storage.latest_snapshot(); + let (_commitment, proof_commitment_on_a) = chain_a_snapshot.get_with_proof(format!("ibc-data/commitments/ports/{port_on_a}/channels/{chan_on_a}/sequences/{sequence}").as_bytes().to_vec()).await?; + + // Now update the chains + let _chain_b_height = self._build_and_send_update_client_a().await?; + let chain_a_height = self._build_and_send_update_client_b().await?; + + let proof_height = chain_a_height; + + let msg_recv_packet = MsgRecvPacket { + packet: Packet { + sequence: Sequence::from_str(&sequence)?, + port_on_a: PortId::from_str(&port_on_a)?, + chan_on_a: ChannelId::from_str(&chan_on_a)?, + port_on_b: PortId::from_str(&port_on_b)?, + chan_on_b: ChannelId::from_str(&chan_on_b)?, + data: hex::decode(packet_data_hex)?, + timeout_height_on_b: TimeoutHeight::from_str(&timeout_height_on_b)?, + timeout_timestamp_on_b: Timestamp::from_str(&timeout_timestamp_on_b)?, + }, + proof_commitment_on_a, + proof_height_on_a: Height { + revision_height: proof_height.revision_height, + revision_number: 0, + }, + signer: self.chain_a_ibc.signer.clone(), + }; + + let plan = { + let ics20_msg = penumbra_transaction::ActionPlan::IbcAction( + IbcRelay::RecvPacket(msg_recv_packet), + ) + .into(); + TransactionPlan { + actions: vec![ics20_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_b_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + + let tx = self + .chain_b_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + let (_end_block_events, dtx_events) = self + .chain_b_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + recv_tx_deliver_tx_events.extend(dtx_events.0.into_iter()); + } + } + + self._sync_chains().await?; + + // Now that the transfer packet has been processed by chain B, + // the relayer tells chain A to process the acknowledgement. + for event in recv_tx_deliver_tx_events.iter() { + if event.kind == "write_acknowledgement" { + let mut packet_data_hex = None; + let mut sequence = None; + let mut port_on_a = None; + let mut chan_on_a = None; + let mut port_on_b = None; + let mut chan_on_b = None; + let mut timeout_height_on_b = None; + let mut timeout_timestamp_on_b = None; + let mut packet_ack_hex = None; + for attr in &event.attributes { + match attr.key.as_str() { + "packet_data_hex" => packet_data_hex = Some(attr.value.clone()), + "packet_sequence" => sequence = Some(attr.value.clone()), + "packet_src_port" => port_on_a = Some(attr.value.clone()), + "packet_src_channel" => chan_on_a = Some(attr.value.clone()), + "packet_dst_port" => port_on_b = Some(attr.value.clone()), + "packet_dst_channel" => chan_on_b = Some(attr.value.clone()), + "packet_timeout_height" => timeout_height_on_b = Some(attr.value.clone()), + "packet_timeout_timestamp" => { + timeout_timestamp_on_b = Some(attr.value.clone()) + } + "packet_ack_hex" => packet_ack_hex = Some(attr.value.clone()), + _ => (), + } + } + + let port_on_a = port_on_a.expect("port_on_a attribute should be present"); + let chan_on_a = chan_on_a.expect("chan_on_a attribute should be present"); + let port_on_b = port_on_b.expect("port_on_b attribute should be present"); + let chan_on_b = chan_on_b.expect("chan_on_b attribute should be present"); + let sequence = sequence.expect("sequence attribute should be present"); + let timeout_height_on_b = + timeout_height_on_b.expect("timeout_height_on_b attribute should be present"); + let timeout_timestamp_on_b = timeout_timestamp_on_b + .expect("timeout_timestamp_on_b attribute should be present"); + let packet_data_hex = + packet_data_hex.expect("packet_data_hex attribute should be present"); + let packet_ack_hex = + packet_ack_hex.expect("packet_ack_hex attribute should be present"); + + let chain_b_snapshot = self.chain_b_ibc.storage.latest_snapshot(); + let (_commitment, proof_acked_on_b) = chain_b_snapshot + .get_with_proof( + format!( + "ibc-data/acks/ports/{port_on_b}/channels/{chan_on_b}/sequences/{sequence}" + ) + .as_bytes() + .to_vec(), + ) + .await?; + + // Now update the chains + let _chain_a_height = self._build_and_send_update_client_b().await?; + let chain_b_height = self._build_and_send_update_client_a().await?; + + let proof_height = chain_b_height; + + let msg_ack = MsgAcknowledgement { + signer: self.chain_a_ibc.signer.clone(), + packet: Packet { + sequence: Sequence::from_str(&sequence)?, + port_on_a: PortId::from_str(&port_on_a)?, + chan_on_a: ChannelId::from_str(&chan_on_a)?, + port_on_b: PortId::from_str(&port_on_b)?, + chan_on_b: ChannelId::from_str(&chan_on_b)?, + data: hex::decode(packet_data_hex)?, + timeout_height_on_b: TimeoutHeight::from_str(&timeout_height_on_b)?, + timeout_timestamp_on_b: Timestamp::from_str(&timeout_timestamp_on_b)?, + }, + acknowledgement: hex::decode(packet_ack_hex)?, + proof_acked_on_b, + proof_height_on_b: Height { + revision_height: proof_height.revision_height, + revision_number: 0, + }, + }; + + let plan = { + let ics20_msg = penumbra_transaction::ActionPlan::IbcAction( + IbcRelay::Acknowledgement(msg_ack), + ) + .into(); + TransactionPlan { + actions: vec![ics20_msg], + // Now fill out the remaining parts of the transaction needed for verification: + memo: None, + detection_data: None, // We'll set this automatically below + transaction_parameters: TransactionParameters { + chain_id: self.chain_a_ibc.chain_id.clone(), + ..Default::default() + }, + } + }; + + let tx = self + .chain_a_ibc + .client() + .await? + .witness_auth_build(&plan) + .await?; + + self.chain_a_ibc + .node + .block() + .with_data(vec![tx.encode_to_vec()]) + .execute() + .await?; + } + } + + self.chain_a_ibc.node.block().execute().await?; + self.chain_b_ibc.node.block().execute().await?; + self._sync_chains().await?; + + Ok(()) + } } // tell chain A about chain B. returns the height of chain b on chain a after update. @@ -1456,5 +1853,8 @@ async fn _build_and_send_update_client( .execute() .await?; - Ok(chain_b_height) + Ok(Height { + revision_height: chain_b_new_height as u64, + revision_number: 0, + }) } diff --git a/crates/core/app/tests/ibc_handshake.rs b/crates/core/app/tests/ics23_transfer.rs similarity index 52% rename from crates/core/app/tests/ibc_handshake.rs rename to crates/core/app/tests/ics23_transfer.rs index 2068d9d6b8..14dc459db2 100644 --- a/crates/core/app/tests/ibc_handshake.rs +++ b/crates/core/app/tests/ics23_transfer.rs @@ -1,6 +1,10 @@ use { + anyhow::anyhow, common::ibc_tests::{MockRelayer, TestNodeWithIBC, ValidatorKeys}, once_cell::sync::Lazy, + penumbra_asset::{asset::Cache, Value}, + penumbra_ibc::IbcToken, + penumbra_num::Amount, std::time::Duration, tap::Tap as _, }; @@ -11,9 +15,11 @@ pub static MAIN_STORE_PROOF_SPEC: Lazy> = mod common; -/// Exercises that the IBC handshake succeeds. +/// Exercises that the IBC handshake succeeds, and that +/// funds can be sent between the two chains successfully, +/// without any testing of error conditions. #[tokio::test] -async fn ibc_handshake() -> anyhow::Result<()> { +async fn ics20_transfer_no_timeouts() -> anyhow::Result<()> { // Install a test logger, and acquire some temporary storage. let guard = common::set_tracing_subscriber(); @@ -73,5 +79,67 @@ async fn ibc_handshake() -> anyhow::Result<()> { // TODO: some testing of failure cases of the handshake process would be good relayer.handshake().await?; + // Grab the note that will be spent during the transfer. + let chain_a_client = relayer.chain_a_ibc.client().await?; + let chain_a_note = chain_a_client + .notes + .values() + .cloned() + .next() + .ok_or_else(|| anyhow!("mock client had no note"))?; + + // Get the balance of that asset on chain A + let pretransfer_balance_a: Amount = chain_a_client + .spendable_notes_by_asset(chain_a_note.asset_id()) + .map(|n| n.value().amount) + .sum(); + + // Get the balance of that asset on chain B + // The asset ID of the IBC transferred asset on chain B + // needs to be computed. + let asset_cache = Cache::with_known_assets(); + let denom = asset_cache + .get(&chain_a_note.asset_id()) + .expect("asset ID should exist in asset cache") + .clone(); + let ibc_token = IbcToken::new( + &relayer.chain_b_ibc.channel_id, + &relayer.chain_b_ibc.port_id, + &denom.to_string(), + ); + let chain_b_client = relayer.chain_b_ibc.client().await?; + let pretransfer_balance_b: Amount = chain_b_client + .spendable_notes_by_asset(ibc_token.id()) + .map(|n| n.value().amount) + .sum(); + + // We will transfer 50% of the `chain_a_note`'s value to the same address on chain B + let transfer_value = Value { + amount: (chain_a_note.amount().value() / 2).into(), + asset_id: chain_a_note.asset_id(), + }; + + // Tell the relayer to process the transfer. + // TODO: currently this just transfers 50% of the first note + // but it'd be nice to have an API with a little more flexibility + relayer.transfer_from_a_to_b().await?; + + // Transfer complete, validate the balances: + let chain_a_client = relayer.chain_a_ibc.client().await?; + let chain_b_client = relayer.chain_b_ibc.client().await?; + let posttransfer_balance_a: Amount = chain_a_client + .spendable_notes_by_asset(chain_a_note.asset_id()) + .map(|n| n.value().amount) + .sum(); + + let posttransfer_balance_b: Amount = chain_b_client + .spendable_notes_by_asset(ibc_token.id()) + .map(|n| n.value().amount) + .sum(); + + assert!(posttransfer_balance_a < pretransfer_balance_a); + assert!(posttransfer_balance_b > pretransfer_balance_b); + assert_eq!(posttransfer_balance_b, transfer_value.amount); + Ok(()).tap(|_| drop(relayer)).tap(|_| drop(guard)) } diff --git a/crates/test/mock-client/src/lib.rs b/crates/test/mock-client/src/lib.rs index a46a29e86c..d8920c0a0a 100644 --- a/crates/test/mock-client/src/lib.rs +++ b/crates/test/mock-client/src/lib.rs @@ -3,7 +3,10 @@ use cnidarium::StateRead; use penumbra_compact_block::{component::StateReadExt as _, CompactBlock, StatePayload}; use penumbra_dex::{swap::SwapPlaintext, swap_claim::SwapClaimPlan}; use penumbra_keys::{keys::SpendKey, FullViewingKey}; -use penumbra_sct::component::{clock::EpochRead, tree::SctRead}; +use penumbra_sct::{ + component::{clock::EpochRead, tree::SctRead}, + Nullifier, +}; use penumbra_shielded_pool::{note, Note, SpendPlan}; use penumbra_tct as tct; use penumbra_transaction::{AuthorizationData, Transaction, TransactionPlan, WitnessData}; @@ -15,7 +18,11 @@ pub struct MockClient { latest_height: u64, sk: SpendKey, pub fvk: FullViewingKey, + /// All notes, whether spent or not. pub notes: BTreeMap, + pub nullifiers: BTreeMap, + /// Whether a note was spent or not. + pub spent_notes: BTreeMap, swaps: BTreeMap, pub sct: penumbra_tct::Tree, } @@ -27,6 +34,8 @@ impl MockClient { fvk: sk.full_viewing_key().clone(), sk, notes: Default::default(), + spent_notes: Default::default(), + nullifiers: Default::default(), sct: Default::default(), swaps: Default::default(), } @@ -103,8 +112,12 @@ impl MockClient { StatePayload::Note { note: payload, .. } => { match payload.trial_decrypt(&self.fvk) { Some(note) => { - self.notes.insert(payload.note_commitment, note.clone()); self.sct.insert(Keep, payload.note_commitment)?; + let nullifier = self + .nullifier(payload.note_commitment) + .expect("newly inserted note should be present in sct"); + self.notes.insert(payload.note_commitment, note.clone()); + self.nullifiers.insert(payload.note_commitment, nullifier); } None => { self.sct.insert(Forget, payload.note_commitment)?; @@ -128,8 +141,18 @@ impl MockClient { let (output_1, output_2) = swap.output_notes(batch_data); // Pre-insert the output notes into our notes table, so that // we can notice them when we scan the block where they are claimed. - self.notes.insert(output_1.commit(), output_1); - self.notes.insert(output_2.commit(), output_2); + // TODO: We should handle tracking the nullifiers for these notes, + // however they aren't inserted into the SCT at this point. + // let nullifier_1 = self + // .nullifier(output_1.commit()) + // .expect("newly inserted swap should be present in sct"); + // let nullifier_2 = self + // .nullifier(output_2.commit()) + // .expect("newly inserted swap should be present in sct"); + self.notes.insert(output_1.commit(), output_1.clone()); + // self.nullifiers.insert(output_1.commit(), nullifier_1); + self.notes.insert(output_2.commit(), output_2.clone()); + // self.nullifiers.insert(output_2.commit(), nullifier_2); } None => { self.sct.insert(Forget, payload.commitment)?; @@ -147,6 +170,24 @@ impl MockClient { } } } + + // Mark spent nullifiers + for nullifier in block.nullifiers { + // skip if we don't know about this nullifier + if !self.nullifiers.values().any(move |n| *n == nullifier) { + continue; + } + + self.spent_notes.insert( + *self + .nullifiers + .iter() + .find_map(|(k, v)| if *v == nullifier { Some(k) } else { None }) + .unwrap(), + (), + ); + } + self.sct.end_block()?; if block.epoch_root.is_some() { self.sct.end_epoch()?; @@ -173,6 +214,17 @@ impl MockClient { self.sct.witness(commitment).map(|proof| proof.position()) } + pub fn nullifier(&self, commitment: note::StateCommitment) -> Option { + let position = self.position(commitment); + + if position.is_none() { + return None; + } + let nk = self.fvk.nullifier_key(); + + Some(Nullifier::derive(&nk, position.unwrap(), &commitment)) + } + pub fn witness_commitment( &self, commitment: note::StateCommitment, @@ -224,4 +276,17 @@ impl MockClient { .values() .filter(move |n| n.asset_id() == asset_id) } + + pub fn spent_note(&self, commitment: ¬e::StateCommitment) -> bool { + self.spent_notes.contains_key(commitment) + } + + pub fn spendable_notes_by_asset( + &self, + asset_id: penumbra_asset::asset::Id, + ) -> impl Iterator + '_ { + self.notes + .values() + .filter(move |n| n.asset_id() == asset_id && !self.spent_note(&n.commit())) + } } diff --git a/crates/test/mock-consensus/src/block.rs b/crates/test/mock-consensus/src/block.rs index 76a8a77c93..7eb45b10a6 100644 --- a/crates/test/mock-consensus/src/block.rs +++ b/crates/test/mock-consensus/src/block.rs @@ -6,8 +6,10 @@ use { crate::TestNode, prost::Message, sha2::{Digest, Sha256}, + std::ops::Deref, tap::Tap, tendermint::{ + abci::Event, account, block::{self, header::Version, Block, Commit, Header, Round}, evidence, @@ -114,7 +116,7 @@ where /// included in the block. Use [`Builder::without_signatures()`] to disable producing /// validator signatures. #[instrument(level = "info", skip_all, fields(height, time))] - pub async fn execute(self) -> Result<(), anyhow::Error> { + pub async fn execute(self) -> Result<(EndBlockEvents, DeliverTxEvents), anyhow::Error> { // Calling `finish` finishes the previous block // and prepares the current block. let (test_node, block) = self.finish()?; @@ -136,11 +138,21 @@ where trace!("sending block"); test_node.begin_block(header, last_commit_info).await?; + let mut deliver_tx_responses = Vec::new(); for tx in data { let tx = tx.into(); - test_node.deliver_tx(tx).await?; + // The caller may want to access the DeliverTx responses + deliver_tx_responses.push(test_node.deliver_tx(tx).await?); } - test_node.end_block().await?; + + // The CheckTx, BeginBlock, DeliverTx, EndBlock methods include an Events field. + // The mock consensus code only handles EndBlock and DeliverTx events. + // Extract the events emitted during end_block. + let events = test_node.end_block().await?.events; + let deliver_tx_events = deliver_tx_responses + .iter() + .flat_map(|response| response.events.clone()) + .collect::>(); // the commit call will set test_node.last_app_hash, preparing // for the next block to begin execution @@ -160,7 +172,7 @@ where // If an `on_block` callback was set, call it now. test_node.on_block.as_mut().map(move |f| f(block)); - Ok(()) + Ok((EndBlockEvents(events), DeliverTxEvents(deliver_tx_events))) } /// Consumes this builder, returning its [`TestNode`] reference and a [`Block`]. @@ -337,3 +349,25 @@ impl CommitHashingExt for Commit { } } } + +#[derive(Debug, Clone)] +pub struct EndBlockEvents(pub Vec); + +#[derive(Debug, Clone)] +pub struct DeliverTxEvents(pub Vec); + +impl Deref for DeliverTxEvents { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Deref for EndBlockEvents { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} From ee3cc9daae64867a5688909ed7efa72d7663e606 Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Wed, 11 Sep 2024 13:19:26 -0700 Subject: [PATCH 14/43] chore: update team email address Updates the email address used for the Penumbra Labs team in the cargo manifest. There's only one point of update, because the crates all reference the top-level workspace fields. Other crates outside the protocol monorepo will need to be updated separately. --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 6ed6f2e0f3..582168aa26 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -102,7 +102,7 @@ tag = true push = false [workspace.package] -authors = ["Penumbra Labs "] +authors = ["Penumbra Labs Date: Thu, 22 Aug 2024 11:19:55 -0700 Subject: [PATCH 15/43] build: bump rust to 1.80 Cleans up some unused cfg_attrs that weren't previously generating warnings. All are cosmetic, and don't affect application functionality. Also adds the `rust-analyzer` component the rust-toolchain toml, ensuring that the correct version of rust-analyzer (corresponding to the channel specified in toolchain.toml) is used, so that developers can easily hook up their IDEs. --- crates/bin/pindexer/Cargo.toml | 2 +- .../core/app/tests/common/temp_storage_ext.rs | 1 + crates/core/app/tests/common/test_node_ext.rs | 3 +++ .../app/tests/common/validator_read_ext.rs | 3 +++ .../component/auction/src/component/auction.rs | 2 +- .../auction/src/component/auction_store.rs | 2 +- .../core/component/governance/src/proposal.rs | 8 -------- crates/core/component/governance/src/vote.rs | 4 ---- crates/crypto/proof-setup/Cargo.toml | 2 +- .../util/tower-trace/src/trace/service_span.rs | 2 -- flake.lock | 18 +++++++++--------- justfile | 4 ++++ rust-toolchain.toml | 4 ++-- 13 files changed, 26 insertions(+), 29 deletions(-) diff --git a/crates/bin/pindexer/Cargo.toml b/crates/bin/pindexer/Cargo.toml index 86805da998..89ef0d746b 100644 --- a/crates/bin/pindexer/Cargo.toml +++ b/crates/bin/pindexer/Cargo.toml @@ -17,7 +17,7 @@ cometindex = {workspace = true} num-bigint = { version = "0.4" } penumbra-shielded-pool = {workspace = true, default-features = false} penumbra-stake = {workspace = true, default-features = false} -penumbra-app = {workspace = true, default-features = false} +penumbra-app = {workspace = true} penumbra-dex = {workspace = true, default-features = false} penumbra-governance = {workspace = true, default-features = false} penumbra-num = {workspace = true, default-features = false} diff --git a/crates/core/app/tests/common/temp_storage_ext.rs b/crates/core/app/tests/common/temp_storage_ext.rs index efff367ca8..f1a2496ded 100644 --- a/crates/core/app/tests/common/temp_storage_ext.rs +++ b/crates/core/app/tests/common/temp_storage_ext.rs @@ -8,6 +8,7 @@ use { #[async_trait] pub trait TempStorageExt: Sized { async fn apply_genesis(self, genesis: AppState) -> anyhow::Result; + #[allow(dead_code)] async fn apply_default_genesis(self) -> anyhow::Result; async fn new_with_penumbra_prefixes() -> anyhow::Result; } diff --git a/crates/core/app/tests/common/test_node_ext.rs b/crates/core/app/tests/common/test_node_ext.rs index 48a7f55fd2..241c3c25f6 100644 --- a/crates/core/app/tests/common/test_node_ext.rs +++ b/crates/core/app/tests/common/test_node_ext.rs @@ -1,3 +1,6 @@ +// These mock-consensus helper traits aren't consumed just yet +#![allow(dead_code)] + use { async_trait::async_trait, cnidarium::TempStorage, penumbra_mock_consensus::TestNode, penumbra_sct::component::clock::EpochRead as _, tap::Tap, diff --git a/crates/core/app/tests/common/validator_read_ext.rs b/crates/core/app/tests/common/validator_read_ext.rs index 5788bed9d7..a88b4836ac 100644 --- a/crates/core/app/tests/common/validator_read_ext.rs +++ b/crates/core/app/tests/common/validator_read_ext.rs @@ -1,3 +1,6 @@ +// These mock-consensus helper traits aren't consumed just yet. +#![allow(dead_code)] + use { async_trait::async_trait, futures::TryStreamExt, diff --git a/crates/core/component/auction/src/component/auction.rs b/crates/core/component/auction/src/component/auction.rs index 7a4b34ceb6..2bf431db16 100644 --- a/crates/core/component/auction/src/component/auction.rs +++ b/crates/core/component/auction/src/component/auction.rs @@ -200,5 +200,5 @@ pub(crate) trait AuctionCircuitBreaker: StateWrite { impl AuctionCircuitBreaker for T {} -#[cfg(tests)] +#[cfg(test)] mod tests {} diff --git a/crates/core/component/auction/src/component/auction_store.rs b/crates/core/component/auction/src/component/auction_store.rs index 4dd0d7800d..75301a170b 100644 --- a/crates/core/component/auction/src/component/auction_store.rs +++ b/crates/core/component/auction/src/component/auction_store.rs @@ -54,5 +54,5 @@ pub trait AuctionStoreRead: StateRead { impl AuctionStoreRead for T {} -#[cfg(tests)] +#[cfg(test)] mod tests {} diff --git a/crates/core/component/governance/src/proposal.rs b/crates/core/component/governance/src/proposal.rs index 050bc21fa6..d979dc444e 100644 --- a/crates/core/component/governance/src/proposal.rs +++ b/crates/core/component/governance/src/proposal.rs @@ -202,29 +202,21 @@ impl TryFrom for Proposal { /// The specific kind of a proposal. #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -#[cfg_attr(feature = "clap", derive(clap::Subcommand))] #[serde(try_from = "pb::ProposalKind", into = "pb::ProposalKind")] pub enum ProposalKind { /// A signaling proposal. - #[cfg_attr(feature = "clap", clap(display_order = 100))] Signaling, /// An emergency proposal. - #[cfg_attr(feature = "clap", clap(display_order = 200))] Emergency, /// A parameter change proposal. - #[cfg_attr(feature = "clap", clap(display_order = 300))] ParameterChange, /// A Community Pool spend proposal. - #[cfg_attr(feature = "clap", clap(display_order = 400))] CommunityPoolSpend, /// An upgrade proposal. - #[cfg_attr(feature = "clap", clap(display_order = 500))] UpgradePlan, /// A proposal to freeze an IBC client. - #[cfg_attr(feature = "clap", clap(display_order = 600))] FreezeIbcClient, /// A proposal to unfreeze an IBC client. - #[cfg_attr(feature = "clap", clap(display_order = 700))] UnfreezeIbcClient, } diff --git a/crates/core/component/governance/src/vote.rs b/crates/core/component/governance/src/vote.rs index ee3b1948c8..1ddf27723d 100644 --- a/crates/core/component/governance/src/vote.rs +++ b/crates/core/component/governance/src/vote.rs @@ -11,16 +11,12 @@ use serde::{Deserialize, Serialize}; #[derive(Clone, Copy, Debug, Deserialize, Serialize, Eq, PartialEq)] #[serde(try_from = "pb::Vote", into = "pb::Vote")] #[cfg_attr(test, derive(proptest_derive::Arbitrary))] -#[cfg_attr(feature = "clap", derive(clap::Subcommand))] pub enum Vote { /// Vote to approve the proposal. - #[cfg_attr(feature = "clap", clap(display_order = 100))] Yes, /// Vote is to reject the proposal. - #[cfg_attr(feature = "clap", clap(display_order = 200))] No, /// Vote to abstain from the proposal. - #[cfg_attr(feature = "clap", clap(display_order = 300))] Abstain, } diff --git a/crates/crypto/proof-setup/Cargo.toml b/crates/crypto/proof-setup/Cargo.toml index 163149ffe7..fe2490c5d5 100644 --- a/crates/crypto/proof-setup/Cargo.toml +++ b/crates/crypto/proof-setup/Cargo.toml @@ -24,7 +24,7 @@ anyhow = {workspace = true} ark-ec = {workspace = true, default-features = false} ark-ff = {workspace = true, default-features = false} ark-groth16 = {workspace = true, default-features = false} -ark-poly = { version = "0.4.2", default_features = false } +ark-poly = { version = "0.4.2", default-features = false } ark-relations = {workspace = true} ark-serialize = {workspace = true} blake2b_simd = {workspace = true} diff --git a/crates/util/tower-trace/src/trace/service_span.rs b/crates/util/tower-trace/src/trace/service_span.rs index 4221802ff4..ab016321b3 100644 --- a/crates/util/tower-trace/src/trace/service_span.rs +++ b/crates/util/tower-trace/src/trace/service_span.rs @@ -101,8 +101,6 @@ pub mod make { _p: PhantomData, } - #[cfg(feature = "tower-layer")] - #[cfg_attr(docsrs, doc(cfg(feature = "tower-layer")))] pub fn layer(get_span: G) -> MakeLayer where G: GetSpan + Clone, diff --git a/flake.lock b/flake.lock index bb0806b107..39abe157f6 100644 --- a/flake.lock +++ b/flake.lock @@ -7,11 +7,11 @@ ] }, "locked": { - "lastModified": 1721058578, - "narHash": "sha256-fs/PVa3H5dS1//4BjecWi3nitXm5fRObx0JxXIAo+JA=", + "lastModified": 1724537630, + "narHash": "sha256-gpqINM71zp3kw5XYwUXa84ZtPnCmLLnByuFoYesT1bY=", "owner": "ipetkov", "repo": "crane", - "rev": "17e5109bb1d9fb393d70fba80988f7d70d1ded1a", + "rev": "3e08f4b1fc9aaede5dd511d8f5f4ef27501e49b0", "type": "github" }, "original": { @@ -40,11 +40,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1720957393, - "narHash": "sha256-oedh2RwpjEa+TNxhg5Je9Ch6d3W1NKi7DbRO1ziHemA=", + "lastModified": 1724819573, + "narHash": "sha256-GnR7/ibgIH1vhoy8cYdmXE6iyZqKqFxQSVkFgosBh6w=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "693bc46d169f5af9c992095736e82c3488bf7dbb", + "rev": "71e91c409d1e654808b2621f28a327acfdad8dc2", "type": "github" }, "original": { @@ -69,11 +69,11 @@ ] }, "locked": { - "lastModified": 1721096425, - "narHash": "sha256-9/58mnoDCyBHsJZwTg3MfgX3kgVqP/SzGMy0WnnWII8=", + "lastModified": 1724898214, + "narHash": "sha256-4yMO9+Lsr3zqTf4clAGGag/bfNTmc/ITOXbJQcOEok4=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "1c95d396d7395829b5c06bea84fb1dd23169ca42", + "rev": "0bc2c784e3a6ce30a2ab1b9f47325ccbed13039f", "type": "github" }, "original": { diff --git a/justfile b/justfile index e11ac43fdf..e7c72fe9c6 100644 --- a/justfile +++ b/justfile @@ -15,6 +15,10 @@ dev: fmt: cargo fmt --all +# Runs 'cargo check' on all rust files in the project. +check: + RUSTFLAGS="-D warnings" cargo check --release --all-targets + # Render livereload environment for editing the Protocol documentation. protocol-docs: # Access local docs at http://127.0.0.1:3002 diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 3ff2a27f7a..23e8b42119 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,8 +1,8 @@ [toolchain] # We set a specific version of rust so that CI workflows use the same # version development environments do. -channel = "1.75" -components = [ "rustfmt" ] +channel = "1.80" +components = [ "rustfmt", "rust-analyzer" ] # Include wasm toolchain, for CI tests to check wasm32 build targets still work, # to avoid downstream breakage in `penumbra-wasm` crate, in the web repo. targets = [ "wasm32-unknown-unknown" ] From 3beea92ceac30c2f3d782a3f6ed094622c40cdb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Thu, 12 Sep 2024 14:46:57 -0700 Subject: [PATCH 16/43] Cometindex performance improvements (#4851) ## Describe your changes This improves the performance of cometindex significantly, especially when many events need to be indexed. Two accidental problems: 1. we forgot to add an index between attributes and events, to efficiently get the attributes associated with an event This alone caused quadratic query performance, which is really bad. 2. Postgres was doing sorting and hashing to join each attribute + event with the blocks and transactions, before grouping attributes. First of all, we only want to join the blocks and transactions after already having grouped the attributes together, to avoid adding a constant factor overhead, since some events may have a handful of attributes. Second of all, we shouldn't be sorting or hash merging at all. The query should be linear and streaming in complexity, and operate by scanning the events table in order, and then selectively plucking other tables columns using their indices references the event id. This PR amends the query to make Postgres actually do this, mainly by informing it that only a single block or transaction will get joined with a transaction. ### Some performance evidence Previously, when starting up pindexer from scratch, it would take 200 seconds before being able to start processing events. Now it takes milliseconds. old query: ``` penumbra_raw=# EXPLAIN SELECT events.rowid, events.type, blocks.height AS block_height, tx_results.tx_hash, jsonb_object_agg(attributes.key, attributes.value) AS attrs FROM events LEFT JOIN attributes ON events.rowid = attributes.event_id JOIN blocks ON events.block_id = blocks.rowid LEFT JOIN tx_results ON events.tx_id = tx_results.rowid WHERE events.rowid > 1000 GROUP BY events.rowid, events.type, blocks.height, tx_results.tx_hash ORDER BY events.rowid ASC; QUERY PLAN -------------------------------------------------------------------------------------------------------------------- GroupAggregate (cost=1444517.14..4825338.21 rows=27193816 width=162) Group Key: events.rowid, blocks.height, tx_results.tx_hash -> Merge Left Join (cost=1444517.14..4213477.35 rows=27193816 width=187) Merge Cond: (events.rowid = attributes.event_id) -> Gather Merge (cost=1444516.70..2690182.07 rows=10695484 width=130) Workers Planned: 2 -> Sort (cost=1443516.68..1454657.81 rows=4456452 width=130) Sort Key: events.rowid, blocks.height, tx_results.tx_hash -> Parallel Hash Left Join (cost=28256.44..342071.06 rows=4456452 width=130) Hash Cond: (events.tx_id = tx_results.rowid) -> Parallel Hash Join (cost=19944.07..322060.42 rows=4456452 width=72) Hash Cond: (events.block_id = blocks.rowid) -> Parallel Seq Scan on events (cost=0.00..183912.12 rows=4456452 width=72) Filter: (rowid > 1000) -> Parallel Hash (cost=12626.92..12626.92 rows=420892 width=16) -> Parallel Seq Scan on blocks (cost=0.00..12626.92 rows=420892 width=16) -> Parallel Hash (cost=7950.50..7950.50 rows=28950 width=74) -> Parallel Seq Scan on tx_results (cost=0.00..7950.50 rows=28950 width=74) -> Index Scan using attributes_event_id_idx on attributes (cost=0.44..1156627.19 rows=27196491 width=65) JIT: Functions: 28 Options: Inlining true, Optimization true, Expressions true, Deforming true ``` new query: ``` penumbra_raw=# EXPLAIN SELECT events.rowid, events.type, blocks.height AS block_height, tx_results.tx_hash, events.attrs FROM ( SELECT rowid, type, block_id, tx_id, jsonb_object_agg(attributes.key, attributes.value) AS attrs FROM events LEFT JOIN attributes ON rowid = attributes.event_id WHERE rowid > 1000 GROUP BY rowid, type, block_id, tx_id ) events LEFT JOIN LATERAL ( SELECT * FROM blocks WHERE blocks.rowid = events.block_id LIMIT 1 ) blocks ON TRUE LEFT JOIN LATERAL ( SELECT * FROM tx_results WHERE tx_results.rowid = events.tx_id LIMIT 1 ) tx_results ON TRUE ORDER BY events.rowid ASC; QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------- Nested Loop Left Join (cost=1.59..181924467.06 rows=10694733 width=162) -> Nested Loop Left Join (cost=1.30..92837341.17 rows=10694733 width=104) -> GroupAggregate (cost=0.87..2226215.83 rows=10694733 width=104) Group Key: events.rowid -> Merge Left Join (cost=0.87..1956570.52 rows=27192229 width=129) Merge Cond: (events.rowid = attributes.event_id) -> Index Scan using events_pkey on events (cost=0.43..433368.16 rows=10694733 width=72) Index Cond: (rowid > 1000) -> Index Scan using attributes_event_id_idx on attributes (cost=0.44..1156555.97 rows=27194904 width=65) -> Limit (cost=0.42..8.44 rows=1 width=56) -> Index Scan using blocks_pkey on blocks (cost=0.42..8.44 rows=1 width=56) Index Cond: (rowid = events.block_id) -> Limit (cost=0.29..8.31 rows=1 width=126) -> Index Scan using tx_results_pkey on tx_results (cost=0.29..8.31 rows=1 width=126) Index Cond: (rowid = events.tx_id) JIT: Functions: 24 Options: Inlining true, Optimization true, Expressions true, Deforming true ``` ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: indexing only. > REPLACE THIS TEXT WITH RATIONALE (CAN BE BRIEF) --- crates/util/cometindex/src/indexer.rs | 62 ++++++++++++++++-------- crates/util/cometindex/vendor/schema.sql | 3 ++ 2 files changed, 45 insertions(+), 20 deletions(-) diff --git a/crates/util/cometindex/src/indexer.rs b/crates/util/cometindex/src/indexer.rs index 27e58acb83..fa21a0af1d 100644 --- a/crates/util/cometindex/src/indexer.rs +++ b/crates/util/cometindex/src/indexer.rs @@ -141,7 +141,7 @@ impl Indexer { let watermark = current_watermark.unwrap_or(0); // Calculate new events count since the last watermark - sqlx::query_as::<_, (i64,)>("SELECT COUNT(*) FROM events WHERE rowid > $1") + sqlx::query_as::<_, (i64,)>("SELECT MAX(rowid) - $1 FROM events") .bind(watermark) .fetch_one(src_db) .await @@ -217,28 +217,50 @@ fn read_events( watermark: i64, ) -> Pin> + Send + '_>> { let event_stream = sqlx::query_as::<_, (i64, String, i64, Option, serde_json::Value)>( + // This query does some shenanigans to ensure good performance. + // The main trick is that we know that each event has 1 block and <= 1 transaction associated + // with it, so we can "encourage" (force) Postgres to avoid doing a hash join and + // then a sort, and instead work from the events in a linear fashion. + // Basically, this query ends up doing: + // + // for event in events >= id: + // attach attributes + // attach block + // attach transaction? r#" -SELECT - events.rowid, - events.type, +SELECT + events.rowid, + events.type, blocks.height AS block_height, tx_results.tx_hash, - jsonb_object_agg(attributes.key, attributes.value) AS attrs -FROM - events -LEFT JOIN - attributes ON events.rowid = attributes.event_id -JOIN - blocks ON events.block_id = blocks.rowid -LEFT JOIN - tx_results ON events.tx_id = tx_results.rowid -WHERE - events.rowid > $1 -GROUP BY - events.rowid, - events.type, - blocks.height, - tx_results.tx_hash + events.attrs +FROM ( + SELECT + rowid, + type, + block_id, + tx_id, + jsonb_object_agg(attributes.key, attributes.value) AS attrs + FROM + events + LEFT JOIN + attributes ON rowid = attributes.event_id + WHERE + rowid > $1 + GROUP BY + rowid, + type, + block_id, + tx_id +) events +LEFT JOIN LATERAL ( + SELECT * FROM blocks WHERE blocks.rowid = events.block_id LIMIT 1 +) blocks +ON TRUE +LEFT JOIN LATERAL ( + SELECT * FROM tx_results WHERE tx_results.rowid = events.tx_id LIMIT 1 +) tx_results +ON TRUE ORDER BY events.rowid ASC "#, diff --git a/crates/util/cometindex/vendor/schema.sql b/crates/util/cometindex/vendor/schema.sql index ce5a241bad..fd78e677e0 100644 --- a/crates/util/cometindex/vendor/schema.sql +++ b/crates/util/cometindex/vendor/schema.sql @@ -65,6 +65,9 @@ CREATE TABLE attributes ( UNIQUE (event_id, key) ); +-- To make it efficient to fetch the attributes of a given event. +CREATE INDEX ON attributes(event_id); + -- A joined view of events and their attributes. Events that do not have any -- attributes are represented as a single row with empty key and value fields. CREATE VIEW event_attributes AS From 90b0a55deef18552869151bdf2dccf8731c6aad7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Fri, 13 Sep 2024 10:03:16 -0700 Subject: [PATCH 17/43] pindexer: Allow for multiple BatchSwaps in a single block (#4853) This fixes a bug where we assumed there was only one swap per block, ignoring the fact that each trading pair will have a different batch swap. ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > indexing only --- crates/bin/pindexer/src/dex/dex.sql | 3 ++- crates/bin/pindexer/src/dex/mod.rs | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/bin/pindexer/src/dex/dex.sql b/crates/bin/pindexer/src/dex/dex.sql index 720dbce968..a5d1c3c13d 100644 --- a/crates/bin/pindexer/src/dex/dex.sql +++ b/crates/bin/pindexer/src/dex/dex.sql @@ -116,7 +116,8 @@ CREATE TABLE IF NOT EXISTS dex_lp_execution ( --- Represents instances where swap executions happened. CREATE TABLE IF NOT EXISTS dex_batch_swap ( - height BIGINT PRIMARY KEY, + id SERIAL PRIMARY KEY, + height BIGINT NOT NULL, trace12_start INTEGER REFERENCES dex_trace (id), trace12_end INTEGER REFERENCES dex_trace (id), trace21_start INTEGER REFERENCES dex_trace (id), diff --git a/crates/bin/pindexer/src/dex/mod.rs b/crates/bin/pindexer/src/dex/mod.rs index f084a25d08..ec668d7bb8 100644 --- a/crates/bin/pindexer/src/dex/mod.rs +++ b/crates/bin/pindexer/src/dex/mod.rs @@ -368,7 +368,7 @@ impl Event { insert_swap_execution(dbtx, execution12.as_ref()).await?; let (trace21_start, trace21_end) = insert_swap_execution(dbtx, execution21.as_ref()).await?; - sqlx::query(r#"INSERT INTO dex_batch_swap VALUES ($1, $2, $3, $4, $5, $6, $7, CAST($8 AS Amount), CAST($9 AS Amount), CAST($10 AS Amount), CAST($11 AS Amount), CAST($12 AS Amount), CAST($13 AS Amount));"#) + sqlx::query(r#"INSERT INTO dex_batch_swap VALUES (DEFAULT, $1, $2, $3, $4, $5, $6, $7, CAST($8 AS Amount), CAST($9 AS Amount), CAST($10 AS Amount), CAST($11 AS Amount), CAST($12 AS Amount), CAST($13 AS Amount));"#) .bind(i64::try_from(*height)?) .bind(trace12_start) .bind(trace12_end) From 402675ef817b93a65cee37df0edc866030b53ff9 Mon Sep 17 00:00:00 2001 From: Lucas Meier Date: Thu, 12 Sep 2024 17:47:08 -0700 Subject: [PATCH 18/43] pindexer: remove unused event dependency in governance The governance indexer doesn't use the BlockRoot event, and so this change avoids triggering and running the indexer on that event, which wastes a noticeable amount of time. --- crates/bin/pindexer/src/governance.rs | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/crates/bin/pindexer/src/governance.rs b/crates/bin/pindexer/src/governance.rs index 4d2a1a91fc..ec49b0a74c 100644 --- a/crates/bin/pindexer/src/governance.rs +++ b/crates/bin/pindexer/src/governance.rs @@ -6,10 +6,7 @@ use penumbra_governance::{ }; use penumbra_num::Amount; use penumbra_proto::{ - core::component::{ - governance::v1::{self as pb}, - sct::v1 as sct_pb, - }, + core::component::governance::v1::{self as pb}, event::ProtoEvent, }; use penumbra_stake::IdentityKey; @@ -27,7 +24,6 @@ const EVENT_PROPOSAL_FAILED: &str = "penumbra.core.component.governance.v1.Event const EVENT_PROPOSAL_SLASHED: &str = "penumbra.core.component.governance.v1.EventProposalSlashed"; const EVENT_PROPOSAL_DEPOSIT_CLAIM: &str = "penumbra.core.component.governance.v1.EventProposalDepositClaim"; -const EVENT_BLOCK_ROOT: &str = "penumbra.core.component.sct.v1.EventBlockRoot"; const ALL_RELEVANT_EVENTS: &[&str] = &[ EVENT_PROPOSAL_SUBMIT, EVENT_DELEGATOR_VOTE, @@ -37,7 +33,6 @@ const ALL_RELEVANT_EVENTS: &[&str] = &[ EVENT_PROPOSAL_FAILED, EVENT_PROPOSAL_SLASHED, EVENT_PROPOSAL_DEPOSIT_CLAIM, - EVENT_BLOCK_ROOT, ]; #[async_trait] @@ -328,10 +323,6 @@ impl AppView for GovernanceProposals { .context("error converting deposit claim")?; handle_proposal_deposit_claim(dbtx, deposit_claim).await?; } - EVENT_BLOCK_ROOT => { - let pe = sct_pb::EventBlockRoot::from_event(event.as_ref())?; - handle_block_root(dbtx, pe.height).await?; - } _ => {} } @@ -544,7 +535,3 @@ async fn handle_proposal_deposit_claim( Ok(()) } - -async fn handle_block_root(_dbtx: &mut PgTransaction<'_>, _height: u64) -> Result<()> { - Ok(()) -} From 649b990a7778bde3ec36a10f8129d9e6a3c98fa9 Mon Sep 17 00:00:00 2001 From: Tal Derei Date: Mon, 16 Sep 2024 11:16:32 -0700 Subject: [PATCH 19/43] getters for action list struct --- crates/core/transaction/src/action_list.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crates/core/transaction/src/action_list.rs b/crates/core/transaction/src/action_list.rs index d1d0b1c2fc..9d20c8f042 100644 --- a/crates/core/transaction/src/action_list.rs +++ b/crates/core/transaction/src/action_list.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use penumbra_proto::core::transaction::v1::Action; use std::collections::BTreeMap; use crate::plan::MemoPlan; @@ -29,6 +30,21 @@ pub struct ActionList { } impl ActionList { + /// Returns an immutable reference to a list of action plans. + pub fn actions(&self) -> &Vec { + &self.actions + } + + /// Returns an immutable reference to a map of change outputs. + pub fn change_outputs(&self) -> &BTreeMap { + &self.change_outputs + } + + /// Returns an immutable reference to the fee. + pub fn fee(&self) -> &Fee { + &self.fee + } + /// Returns true if the resulting transaction would require a memo. pub fn requires_memo(&self) -> bool { let has_change_outputs = !self.change_outputs.is_empty(); From 5bf84e34bfa272a1e0024cfc0e033f2c623679d3 Mon Sep 17 00:00:00 2001 From: Tal Derei Date: Mon, 16 Sep 2024 11:21:41 -0700 Subject: [PATCH 20/43] linting: --- crates/core/transaction/src/action_list.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/core/transaction/src/action_list.rs b/crates/core/transaction/src/action_list.rs index 9d20c8f042..8c9ce51b58 100644 --- a/crates/core/transaction/src/action_list.rs +++ b/crates/core/transaction/src/action_list.rs @@ -1,5 +1,4 @@ use anyhow::Result; -use penumbra_proto::core::transaction::v1::Action; use std::collections::BTreeMap; use crate::plan::MemoPlan; From 6c0ba1cd814880f4b1d509e839514acf43588905 Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Tue, 27 Aug 2024 11:44:35 -0700 Subject: [PATCH 21/43] docs: add issue template for point-release This is the issue format I've been using for e.g. v0.80.3 (#4829), so I'm adding to the repo, to make it easier to find and reuse. In the process, I'm cleaning out the old "release testnet" checklist, because that's not a process we follow regularly anymore: we don't have regular or frequent chain resets. --- .github/ISSUE_TEMPLATE/point_release.md | 30 +++++++++++++++ .github/ISSUE_TEMPLATE/release_testnet.md | 46 ----------------------- 2 files changed, 30 insertions(+), 46 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/point_release.md delete mode 100644 .github/ISSUE_TEMPLATE/release_testnet.md diff --git a/.github/ISSUE_TEMPLATE/point_release.md b/.github/ISSUE_TEMPLATE/point_release.md new file mode 100644 index 0000000000..dcb6aae726 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/point_release.md @@ -0,0 +1,30 @@ +--- +name: Point release +about: Checklist for preparing a point release for tooling +title: Release vX.Y.Z, via point-release +labels: '' +assignees: '' +--- + +# Tooling Release + + +In order to ship some minor improvements and bug fixes, let's prepare a `vX.Y.Z.` release, flushing out the current contents of the main branch. + +## Changes to include + + +- [ ] Everything on current main +- [ ] Feature foo in PR: +- [ ] Feature bar in PR: + +## Compatibility +As this is a point-release, all changes must be fully compatible for all nodes and clients. +Careful attention should be given to the delta between most recent tag on the main branch: +https://github.com/penumbra-zone/penumbra/compare/v(X.Y.(Z-1)..main diff --git a/.github/ISSUE_TEMPLATE/release_testnet.md b/.github/ISSUE_TEMPLATE/release_testnet.md deleted file mode 100644 index d27ec313fb..0000000000 --- a/.github/ISSUE_TEMPLATE/release_testnet.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -name: Testnet release -about: Checklist for releasing a testnet -title: '' -labels: '' -assignees: '' - ---- - -# Testnet Release - -Testnet name: X -Release date: X -Testnet release manager: X - -# Testnet Release Manager Checklist - -Preceding Friday (sprint planning day): - -- [ ] Create GitHub project column, work with team to populate the milestone with tickets targeted for the release. - -Tuesday (or after release of previous testnet): - -- [ ] Construct the genesis data for the release: - - [ ] Create new testnet directory with initial genesis allocations for this testnet by running `cd testnets && ./new-testnet.sh` - - This genesis data will be used for `testnet-preview` with a randomized version of the future testnet's chain ID. - -Thursday: - -- [ ] Check in with team again in a release meeting and update the GitHub milestone to ensure it represents what will make it into the testnet. -- [ ] Draft an announcement for peer review to ensure major changes included are comprehensive. - -Following Monday (release day): - -- [ ] Verify that `testnet-preview.penumbra.zone` is operational; it is redeployed on every push to main, and is an exact preview of what is about to be deployed. -- [ ] Bump the version number and push its tag, via [cargo-release](https://crates.io/crates/cargo-release). - - [ ] Run `cargo release minor` for a new testnet, or `cargo release patch` for a bugfix. For the latter, make sure you're on a dedicated release branch. - - [ ] Push the commit and newly generated tag, e.g. `v0.51.0`, to the remote. -- [ ] Wait for the ["Release" workflow](https://github.com/penumbra-zone/penumbra/actions/workflows/release.yml) to complete: it'll take ~60m, most of which is the Windows builds. -- [ ] Edit the newly created (and published) release object, then click "Generate release notes." Cut and paste the generated text from the bottom to the top of the post, then save it. -- [ ] You must [manually review](https://docs.github.com/en/actions/managing-workflow-runs/reviewing-deployments) the [`Waiting` deployment in the GitHub Actions UI](https://github.com/penumbra-zone/penumbra/actions/workflows/deploy-testnet.yml) before the deployment will begin. Monitor the GitHub action to ensure it completes after it is approved. -- [ ] Delegate to the Penumbra Labs CI validators; use amounts of ~200k `penumbra` per validator. -- [ ] Open a position to provide some initial liquidity: `pcli tx lp replicate xyk penumbra:test_usd 20000penumbra --current-price 20` -- [ ] Update Galileo deployment, [following docs](https://github.com/penumbra-zone/galileo). -- [ ] Update Osiris deployment, [following docs](https://github.com/penumbra-zone/osiris). -- [ ] Make the announcement to Discord! 🎉🎉🎉 From ae4ed057fc306d967f9cc816906f09fee668549d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Tue, 17 Sep 2024 10:45:22 -0700 Subject: [PATCH 22/43] cometindex: speedup by committing event changes in batches of 1000 (#4854) Instead of creating one transaction for each event we need to index, we instead only close this transaction every 1000 events (or when when we've caught up to the database). This gives about a 5x performance in catch up speed. ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > indexing only --- crates/util/cometindex/src/indexer.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/crates/util/cometindex/src/indexer.rs b/crates/util/cometindex/src/indexer.rs index fa21a0af1d..962b845dfb 100644 --- a/crates/util/cometindex/src/indexer.rs +++ b/crates/util/cometindex/src/indexer.rs @@ -152,6 +152,7 @@ impl Indexer { let mut relevant_events = 0usize; let mut es = read_events(&src_db, watermark); + let mut dbtx = dst_db.begin().await?; while let Some(event) = es.next().await.transpose()? { if scanned_events % 1000 == 0 { tracing::info!(scanned_events, relevant_events); @@ -178,8 +179,6 @@ impl Indexer { relevant_events += 1; - // Otherwise we have something to process. Make a dbtx - let mut dbtx = dst_db.begin().await?; for index in indexes { if index.is_relevant(&event.as_ref().kind) { tracing::debug!(?event, ?index, "relevant to index"); @@ -188,8 +187,15 @@ impl Indexer { } // Mark that we got to at least this event update_watermark(&mut dbtx, event.local_rowid).await?; - dbtx.commit().await?; + // Only commit in batches of <= 1000 events, for about a 5x performance increase when + // catching up. + if relevant_events % 1000 == 0 { + dbtx.commit().await?; + dbtx = dst_db.begin().await?; + } } + // Flush out the remaining changes. + dbtx.commit().await?; Ok(()) } From 43f6e9375354b4684d4232e89270244e803175f6 Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Tue, 17 Sep 2024 11:08:37 -0700 Subject: [PATCH 23/43] chore: release version 0.80.5 --- Cargo.lock | 96 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 49 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7d770d09ee..c70189105a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1233,7 +1233,7 @@ dependencies = [ [[package]] name = "cnidarium" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "async-trait", @@ -1269,7 +1269,7 @@ dependencies = [ [[package]] name = "cnidarium-component" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "async-trait", @@ -1307,7 +1307,7 @@ dependencies = [ [[package]] name = "cometindex" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "async-trait", @@ -1668,7 +1668,7 @@ dependencies = [ [[package]] name = "decaf377-fmd" -version = "0.80.4" +version = "0.80.5" dependencies = [ "ark-ff", "ark-serialize", @@ -1683,7 +1683,7 @@ dependencies = [ [[package]] name = "decaf377-frost" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -1698,7 +1698,7 @@ dependencies = [ [[package]] name = "decaf377-ka" -version = "0.80.4" +version = "0.80.5" dependencies = [ "ark-ff", "decaf377", @@ -4213,7 +4213,7 @@ dependencies = [ [[package]] name = "pcli" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -4295,7 +4295,7 @@ dependencies = [ [[package]] name = "pclientd" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "assert_cmd", @@ -4347,7 +4347,7 @@ dependencies = [ [[package]] name = "pd" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -4500,7 +4500,7 @@ dependencies = [ [[package]] name = "penumbra-app" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -4588,7 +4588,7 @@ dependencies = [ [[package]] name = "penumbra-asset" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -4628,7 +4628,7 @@ dependencies = [ [[package]] name = "penumbra-auction" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -4683,7 +4683,7 @@ dependencies = [ [[package]] name = "penumbra-auto-https" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "axum-server", @@ -4695,7 +4695,7 @@ dependencies = [ [[package]] name = "penumbra-bench" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-bls12-377", @@ -4739,7 +4739,7 @@ dependencies = [ [[package]] name = "penumbra-community-pool" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -4771,7 +4771,7 @@ dependencies = [ [[package]] name = "penumbra-compact-block" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -4806,7 +4806,7 @@ dependencies = [ [[package]] name = "penumbra-custody" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "argon2", @@ -4842,7 +4842,7 @@ dependencies = [ [[package]] name = "penumbra-dex" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -4904,7 +4904,7 @@ dependencies = [ [[package]] name = "penumbra-distributions" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "async-trait", @@ -4922,7 +4922,7 @@ dependencies = [ [[package]] name = "penumbra-eddy" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -4940,7 +4940,7 @@ dependencies = [ [[package]] name = "penumbra-fee" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -4967,7 +4967,7 @@ dependencies = [ [[package]] name = "penumbra-funding" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "async-trait", @@ -4990,7 +4990,7 @@ dependencies = [ [[package]] name = "penumbra-governance" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -5044,7 +5044,7 @@ dependencies = [ [[package]] name = "penumbra-ibc" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -5081,7 +5081,7 @@ dependencies = [ [[package]] name = "penumbra-keys" -version = "0.80.4" +version = "0.80.5" dependencies = [ "aes", "anyhow", @@ -5128,7 +5128,7 @@ dependencies = [ [[package]] name = "penumbra-measure" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "bytesize", @@ -5146,7 +5146,7 @@ dependencies = [ [[package]] name = "penumbra-mock-client" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "cnidarium", @@ -5163,7 +5163,7 @@ dependencies = [ [[package]] name = "penumbra-mock-consensus" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "bytes", @@ -5183,7 +5183,7 @@ dependencies = [ [[package]] name = "penumbra-mock-tendermint-proxy" -version = "0.80.4" +version = "0.80.5" dependencies = [ "hex", "pbjson-types", @@ -5198,7 +5198,7 @@ dependencies = [ [[package]] name = "penumbra-num" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -5235,7 +5235,7 @@ dependencies = [ [[package]] name = "penumbra-proof-params" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ec", @@ -5263,7 +5263,7 @@ dependencies = [ [[package]] name = "penumbra-proof-setup" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ec", @@ -5290,7 +5290,7 @@ dependencies = [ [[package]] name = "penumbra-proto" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "async-trait", @@ -5324,7 +5324,7 @@ dependencies = [ [[package]] name = "penumbra-sct" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -5360,7 +5360,7 @@ dependencies = [ [[package]] name = "penumbra-shielded-pool" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -5414,7 +5414,7 @@ dependencies = [ [[package]] name = "penumbra-stake" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -5467,7 +5467,7 @@ dependencies = [ [[package]] name = "penumbra-tct" -version = "0.80.4" +version = "0.80.5" dependencies = [ "ark-ed-on-bls12-377", "ark-ff", @@ -5499,7 +5499,7 @@ dependencies = [ [[package]] name = "penumbra-tct-property-test" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "futures", @@ -5511,7 +5511,7 @@ dependencies = [ [[package]] name = "penumbra-tct-visualize" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "axum", @@ -5541,7 +5541,7 @@ dependencies = [ [[package]] name = "penumbra-tendermint-proxy" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "chrono", @@ -5573,7 +5573,7 @@ dependencies = [ [[package]] name = "penumbra-test-subscriber" -version = "0.80.4" +version = "0.80.5" dependencies = [ "tracing", "tracing-subscriber 0.3.18", @@ -5581,7 +5581,7 @@ dependencies = [ [[package]] name = "penumbra-tower-trace" -version = "0.80.4" +version = "0.80.5" dependencies = [ "futures", "hex", @@ -5602,7 +5602,7 @@ dependencies = [ [[package]] name = "penumbra-transaction" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-ff", @@ -5655,7 +5655,7 @@ dependencies = [ [[package]] name = "penumbra-txhash" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "blake2b_simd 1.0.2", @@ -5668,7 +5668,7 @@ dependencies = [ [[package]] name = "penumbra-view" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-std", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "penumbra-wallet" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-std", @@ -5812,7 +5812,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pindexer" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "clap", @@ -7662,7 +7662,7 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "summonerd" -version = "0.80.4" +version = "0.80.5" dependencies = [ "anyhow", "ark-groth16", diff --git a/Cargo.toml b/Cargo.toml index 582168aa26..d471f6fab0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,7 +104,7 @@ push = false [workspace.package] authors = ["Penumbra Labs Date: Fri, 20 Sep 2024 11:16:59 -0400 Subject: [PATCH 24/43] fix(ibc): use `HostInterface` for all block height reads in ibc server (#4862) ## Describe your changes previously, the IBC gRPC server was accessing penumbra-specific state. this updates the server to use `HostInterface` for all block height reads. ## Issue ticket number and link n/a ## Checklist before requesting a review - [ ] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > only affects gRPC server responses --- .../ibc/src/component/rpc/client_query.rs | 53 +++++++++---------- .../ibc/src/component/rpc/connection_query.rs | 35 ++++++------ .../ibc/src/component/rpc/consensus_query.rs | 48 +++++++---------- 3 files changed, 62 insertions(+), 74 deletions(-) diff --git a/crates/core/component/ibc/src/component/rpc/client_query.rs b/crates/core/component/ibc/src/component/rpc/client_query.rs index 2c4d66ec0a..3c7fa28fb3 100644 --- a/crates/core/component/ibc/src/component/rpc/client_query.rs +++ b/crates/core/component/ibc/src/component/rpc/client_query.rs @@ -11,7 +11,6 @@ use ibc_proto::ibc::core::client::v1::{ QueryUpgradedClientStateResponse, QueryUpgradedConsensusStateRequest, QueryUpgradedConsensusStateResponse, }; -use penumbra_sct::component::clock::EpochRead; use prost::Message; use ibc_types::core::client::ClientId; @@ -55,19 +54,19 @@ impl ClientQuery for IbcQuery { .transpose() .map_err(|e| tonic::Status::aborted(format!("couldn't decode client state: {e}")))?; - let res = - QueryClientStateResponse { - client_state, - proof: proof.encode_to_vec(), - proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot.get_block_height().await.map_err(|e| { - tonic::Status::aborted(format!("couldn't decode height: {e}")) - })? + 1, - revision_number: HI::get_revision_number(&snapshot).await.map_err(|e| { - tonic::Status::aborted(format!("couldn't decode height: {e}")) - })?, - }), - }; + let res = QueryClientStateResponse { + client_state, + proof: proof.encode_to_vec(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: HI::get_block_height(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, + }), + }; Ok(tonic::Response::new(res)) } @@ -139,19 +138,19 @@ impl ClientQuery for IbcQuery { .transpose() .map_err(|e| tonic::Status::aborted(format!("couldn't decode consensus state: {e}")))?; - let res = - QueryConsensusStateResponse { - consensus_state, - proof: proof.encode_to_vec(), - proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot.get_block_height().await.map_err(|e| { - tonic::Status::aborted(format!("couldn't decode height: {e}")) - })? + 1, - revision_number: HI::get_revision_number(&snapshot).await.map_err(|e| { - tonic::Status::aborted(format!("couldn't decode height: {e}")) - })?, - }), - }; + let res = QueryConsensusStateResponse { + consensus_state, + proof: proof.encode_to_vec(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: HI::get_block_height(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, + }), + }; Ok(tonic::Response::new(res)) } diff --git a/crates/core/component/ibc/src/component/rpc/connection_query.rs b/crates/core/component/ibc/src/component/rpc/connection_query.rs index 4f653fc755..70ed5dbd2e 100644 --- a/crates/core/component/ibc/src/component/rpc/connection_query.rs +++ b/crates/core/component/ibc/src/component/rpc/connection_query.rs @@ -16,7 +16,6 @@ use ibc_types::path::{ ClientConnectionPath, ClientConsensusStatePath, ClientStatePath, ConnectionPath, }; use ibc_types::DomainType; -use penumbra_sct::component::clock::EpochRead as _; use prost::Message; use std::str::FromStr; @@ -59,19 +58,19 @@ impl ConnectionQuery for IbcQuery let conn = conn.map_err(|e| tonic::Status::aborted(format!("couldn't decode connection: {e}")))?; - let res = - QueryConnectionResponse { - connection: conn, - proof: proof.encode_to_vec(), - proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot.get_block_height().await.map_err(|e| { - tonic::Status::aborted(format!("couldn't decode height: {e}")) - })? + 1, - revision_number: HI::get_revision_number(&snapshot).await.map_err(|e| { - tonic::Status::aborted(format!("couldn't decode height: {e}")) - })?, - }), - }; + let res = QueryConnectionResponse { + connection: conn, + proof: proof.encode_to_vec(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: HI::get_block_height(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, + }), + }; Ok(tonic::Response::new(res)) } @@ -168,8 +167,7 @@ impl ConnectionQuery for IbcQuery connection_paths, proof: proof.encode_to_vec(), proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot - .get_block_height() + revision_height: HI::get_block_height(&snapshot) .await .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + 1, @@ -222,8 +220,7 @@ impl ConnectionQuery for IbcQuery identified_client_state: Some(identified_client_state), proof: proof.encode_to_vec(), proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot - .get_block_height() + revision_height: HI::get_block_height(&snapshot) .await .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + 1, @@ -280,7 +277,7 @@ impl ConnectionQuery for IbcQuery client_id: client_id.to_string(), proof: proof.encode_to_vec(), proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot.get_block_height().await.map_err(|e| { + revision_height: HI::get_block_height(&snapshot).await.map_err(|e| { tonic::Status::aborted(format!("couldn't decode height: {e}")) })? + 1, revision_number: HI::get_revision_number(&snapshot).await.map_err(|e| { diff --git a/crates/core/component/ibc/src/component/rpc/consensus_query.rs b/crates/core/component/ibc/src/component/rpc/consensus_query.rs index c37d654de4..4d67297416 100644 --- a/crates/core/component/ibc/src/component/rpc/consensus_query.rs +++ b/crates/core/component/ibc/src/component/rpc/consensus_query.rs @@ -25,7 +25,6 @@ use ibc_types::DomainType; use ibc_types::core::channel::{ChannelId, IdentifiedChannelEnd, PortId}; use ibc_types::core::connection::ConnectionId; -use penumbra_sct::component::clock::EpochRead as _; use prost::Message; use std::str::FromStr; @@ -68,19 +67,19 @@ impl ConsensusQuery for IbcQuery let channel = channel.map_err(|e| tonic::Status::aborted(format!("couldn't decode channel: {e}")))?; - let res = - QueryChannelResponse { - channel, - proof: proof.encode_to_vec(), - proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot.get_block_height().await.map_err(|e| { - tonic::Status::aborted(format!("couldn't decode height: {e}")) - })? + 1, - revision_number: HI::get_revision_number(&snapshot).await.map_err(|e| { - tonic::Status::aborted(format!("couldn't decode height: {e}")) - })?, - }), - }; + let res = QueryChannelResponse { + channel, + proof: proof.encode_to_vec(), + proof_height: Some(ibc_proto::ibc::core::client::v1::Height { + revision_height: HI::get_block_height(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + + 1, + revision_number: HI::get_revision_number(&snapshot) + .await + .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))?, + }), + }; Ok(tonic::Response::new(res)) } @@ -258,8 +257,7 @@ impl ConsensusQuery for IbcQuery identified_client_state: Some(identified_client_state), proof: proof.encode_to_vec(), proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot - .get_block_height() + revision_height: HI::get_block_height(&snapshot) .await .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + 1, @@ -351,8 +349,7 @@ impl ConsensusQuery for IbcQuery client_id: connection.client_id.clone().to_string(), proof: proof.encode_to_vec(), proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot - .get_block_height() + revision_height: HI::get_block_height(&snapshot) .await .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + 1, @@ -398,8 +395,7 @@ impl ConsensusQuery for IbcQuery commitment, proof: proof.encode_to_vec(), proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot - .get_block_height() + revision_height: HI::get_block_height(&snapshot) .await .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + 1, @@ -501,8 +497,7 @@ impl ConsensusQuery for IbcQuery received: receipt.is_some(), proof: proof.encode_to_vec(), proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot - .get_block_height() + revision_height: HI::get_block_height(&snapshot) .await .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + 1, @@ -546,8 +541,7 @@ impl ConsensusQuery for IbcQuery acknowledgement, proof: proof.encode_to_vec(), proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot - .get_block_height() + revision_height: HI::get_block_height(&snapshot) .await .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + 1, @@ -745,8 +739,7 @@ impl ConsensusQuery for IbcQuery next_sequence_receive: next_recv_sequence, proof: proof.encode_to_vec(), proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot - .get_block_height() + revision_height: HI::get_block_height(&snapshot) .await .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + 1, @@ -787,8 +780,7 @@ impl ConsensusQuery for IbcQuery next_sequence_send: next_send_sequence, proof: proof.encode_to_vec(), proof_height: Some(ibc_proto::ibc::core::client::v1::Height { - revision_height: snapshot - .get_block_height() + revision_height: HI::get_block_height(&snapshot) .await .map_err(|e| tonic::Status::aborted(format!("couldn't decode height: {e}")))? + 1, From c0e4403be34521c28267699cdb6aeaec898a2f0c Mon Sep 17 00:00:00 2001 From: Henry de Valence Date: Mon, 23 Sep 2024 11:53:01 -0700 Subject: [PATCH 25/43] pcli: disable warning post-mainnet (#4864) ## Describe your changes Removes the testnet-specific warning message from `pcli`. I remember merging this months ago but it is not in the main branch. ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > `pcli` changes only --- crates/bin/pcli/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/bin/pcli/src/main.rs b/crates/bin/pcli/src/main.rs index 29f23a61b9..3f56af08c1 100644 --- a/crates/bin/pcli/src/main.rs +++ b/crates/bin/pcli/src/main.rs @@ -10,8 +10,8 @@ use pcli::{command::*, opt::Opt}; #[tokio::main] async fn main() -> Result<()> { - // Display a warning message to the user so they don't get upset when all their tokens are lost. - if std::env::var("PCLI_UNLEASH_DANGER").is_err() { + // Preserved for posterity and memory + if std::env::var("PCLI_DISPLAY_WARNING").is_ok() { pcli::warning::display(); } From 0f133aa2b720ff30b5015a18abea00f2fea79511 Mon Sep 17 00:00:00 2001 From: redshiftzero Date: Tue, 24 Sep 2024 14:07:27 -0400 Subject: [PATCH 26/43] governance: validate client ID earlier than component/view.rs (#4859) ## Describe your changes This PR modifies the governance code to check the IBC `ClientId` is valid prior to creating the `Proposal` domain type ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > This validation is already performed, just later in the handling of the `Proposal` in `component/view.rs` --- crates/core/component/governance/src/proposal.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/core/component/governance/src/proposal.rs b/crates/core/component/governance/src/proposal.rs index d979dc444e..8931f2dfc1 100644 --- a/crates/core/component/governance/src/proposal.rs +++ b/crates/core/component/governance/src/proposal.rs @@ -1,5 +1,6 @@ use anyhow::Context; use bytes::Bytes; +use ibc_types::core::client::ClientId; use serde::{Deserialize, Serialize}; use std::str::FromStr; @@ -144,6 +145,9 @@ impl TryFrom for Proposal { if freeze_ibc_client.client_id.len() > 128 { anyhow::bail!("client ID must be less than 128 bytes"); } + // Validation: Check the client ID is valid using the validation inside `ClientId::from_str`. + ClientId::from_str(&freeze_ibc_client.client_id) + .map_err(|e| anyhow::anyhow!("invalid client id: {e}"))?; ProposalPayload::FreezeIbcClient { client_id: freeze_ibc_client.client_id, } @@ -153,6 +157,9 @@ impl TryFrom for Proposal { if unfreeze_ibc_client.client_id.len() > 128 { anyhow::bail!("client ID must be less than 128 bytes"); } + // Validation: Check the client ID is valid using the validation inside `ClientId::from_str`. + ClientId::from_str(&unfreeze_ibc_client.client_id) + .map_err(|e| anyhow::anyhow!("invalid client id: {e}"))?; ProposalPayload::UnfreezeIbcClient { client_id: unfreeze_ibc_client.client_id, } From aa1359fd8a21ffc55165e82fa724c03456c35585 Mon Sep 17 00:00:00 2001 From: Ava Howell Date: Thu, 26 Sep 2024 13:29:13 -0700 Subject: [PATCH 27/43] total supply indexer (#4863) #4835 --------- Co-authored-by: Lucas Meier --- Cargo.lock | 1 + crates/bin/pindexer/Cargo.toml | 1 + crates/bin/pindexer/src/indexer_ext.rs | 1 + crates/bin/pindexer/src/lib.rs | 1 + crates/bin/pindexer/src/supply.rs | 594 +++++++++++++++++++++++++ 5 files changed, 598 insertions(+) create mode 100644 crates/bin/pindexer/src/supply.rs diff --git a/Cargo.lock b/Cargo.lock index c70189105a..fec5db70ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5822,6 +5822,7 @@ dependencies = [ "penumbra-asset", "penumbra-dex", "penumbra-governance", + "penumbra-keys", "penumbra-num", "penumbra-proto", "penumbra-shielded-pool", diff --git a/crates/bin/pindexer/Cargo.toml b/crates/bin/pindexer/Cargo.toml index 89ef0d746b..7eca6148b8 100644 --- a/crates/bin/pindexer/Cargo.toml +++ b/crates/bin/pindexer/Cargo.toml @@ -19,6 +19,7 @@ penumbra-shielded-pool = {workspace = true, default-features = false} penumbra-stake = {workspace = true, default-features = false} penumbra-app = {workspace = true} penumbra-dex = {workspace = true, default-features = false} +penumbra-keys = {workspace = true, default-features = false} penumbra-governance = {workspace = true, default-features = false} penumbra-num = {workspace = true, default-features = false} penumbra-asset = {workspace = true, default-features = false} diff --git a/crates/bin/pindexer/src/indexer_ext.rs b/crates/bin/pindexer/src/indexer_ext.rs index 80d034950d..8ee1939318 100644 --- a/crates/bin/pindexer/src/indexer_ext.rs +++ b/crates/bin/pindexer/src/indexer_ext.rs @@ -11,5 +11,6 @@ impl IndexerExt for cometindex::Indexer { .with_index(crate::stake::UndelegationTxs {}) .with_index(crate::governance::GovernanceProposals {}) .with_index(crate::dex::Component::new()) + .with_index(crate::supply::Component::new()) } } diff --git a/crates/bin/pindexer/src/lib.rs b/crates/bin/pindexer/src/lib.rs index 5cf2d8bd14..2e17584475 100644 --- a/crates/bin/pindexer/src/lib.rs +++ b/crates/bin/pindexer/src/lib.rs @@ -7,5 +7,6 @@ pub mod dex; pub mod shielded_pool; mod sql; pub mod stake; +pub mod supply; pub mod governance; diff --git a/crates/bin/pindexer/src/supply.rs b/crates/bin/pindexer/src/supply.rs new file mode 100644 index 0000000000..62643a2706 --- /dev/null +++ b/crates/bin/pindexer/src/supply.rs @@ -0,0 +1,594 @@ +use std::collections::{BTreeMap, HashSet}; + +use anyhow::{anyhow, Context, Result}; +use cometindex::{async_trait, sqlx, AppView, ContextualizedEvent, PgTransaction}; +use penumbra_app::genesis::{AppState, Content}; +use penumbra_asset::{asset, STAKING_TOKEN_ASSET_ID}; +use penumbra_num::Amount; +use penumbra_proto::{ + event::ProtoEvent, penumbra::core::component::funding::v1 as pb_funding, + penumbra::core::component::stake::v1 as pb_stake, +}; +use penumbra_stake::{rate::RateData, validator::Validator, IdentityKey}; +use sqlx::{PgPool, Postgres, Transaction}; +use std::iter; + +mod unstaked_supply { + //! This module handles updates around the unstaked supply. + use anyhow::Result; + use cometindex::PgTransaction; + + /// Initialize the database tables for this module. + pub async fn init_db(dbtx: &mut PgTransaction<'_>) -> Result<()> { + sqlx::query( + r#" + CREATE TABLE IF NOT EXISTS supply_total_unstaked ( + height BIGINT PRIMARY KEY, + um BIGINT NOT NULL + ); + "#, + ) + .execute(dbtx.as_mut()) + .await?; + Ok(()) + } + + /// Get the supply for at a given height. + async fn get_supply(dbtx: &mut PgTransaction<'_>, height: u64) -> Result> { + let row: Option = sqlx::query_scalar( + "SELECT um FROM supply_total_unstaked WHERE height <= $1 ORDER BY height DESC LIMIT 1", + ) + .bind(i64::try_from(height)?) + .fetch_optional(dbtx.as_mut()) + .await?; + row.map(|x| u64::try_from(x)) + .transpose() + .map_err(Into::into) + } + + /// Set the supply at a given height. + async fn set_supply(dbtx: &mut PgTransaction<'_>, height: u64, supply: u64) -> Result<()> { + sqlx::query( + r#" + INSERT INTO + supply_total_unstaked + VALUES ($1, $2) + ON CONFLICT (height) + DO UPDATE SET + um = excluded.um + "#, + ) + .bind(i64::try_from(height)?) + .bind(i64::try_from(supply)?) + .execute(dbtx.as_mut()) + .await?; + Ok(()) + } + + /// Modify the supply at a given height. + /// + /// This will take the supply at the given height, and replace it with the + /// new result produced by the function. + pub async fn modify( + dbtx: &mut PgTransaction<'_>, + height: u64, + f: impl FnOnce(Option) -> Result, + ) -> Result<()> { + let supply = get_supply(dbtx, height).await?; + let new_supply = f(supply)?; + set_supply(dbtx, height, new_supply).await + } +} + +mod delegated_supply { + //! This module handles updates around the delegated supply to a validator. + use anyhow::{anyhow, Result}; + use cometindex::PgTransaction; + use penumbra_num::fixpoint::U128x128; + use penumbra_stake::{rate::RateData, IdentityKey}; + + const BPS_SQUARED: u64 = 1_0000_0000u64; + + /// Represents the supply around a given validator. + /// + /// The supply needs to track the amount of delegated tokens to that validator, + /// as well as the conversion rate from those tokens to the native token. + #[derive(Clone, Copy)] + pub struct Supply { + um: u64, + del_um: u64, + rate_bps2: u64, + } + + impl Default for Supply { + fn default() -> Self { + Self { + um: 0, + del_um: 0, + rate_bps2: BPS_SQUARED, + } + } + } + + impl Supply { + /// Change the amount of um in this supply, by adding or removing um. + pub fn add_um(self, delta: i64) -> Result { + let rate = U128x128::ratio(self.rate_bps2, BPS_SQUARED)?; + let negate = delta.is_negative(); + let delta = delta.unsigned_abs(); + let um_delta = delta; + let del_um_delta = if rate == U128x128::from(0u128) { + 0u64 + } else { + let del_um_delta = (U128x128::from(delta) / rate)?; + let rounded = if negate { + // So that we don't remove too few del_um + del_um_delta.round_up()? + } else { + // So that we don't add too many del_um + del_um_delta.round_down() + }; + rounded.try_into()? + }; + let out = if negate { + Self { + um: self + .um + .checked_sub(um_delta) + .ok_or(anyhow!("supply modification failed"))?, + del_um: self + .del_um + .checked_sub(del_um_delta) + .ok_or(anyhow!("supply modification failed"))?, + rate_bps2: self.rate_bps2, + } + } else { + Self { + um: self + .um + .checked_add(um_delta) + .ok_or(anyhow!("supply modification failed"))?, + del_um: self + .del_um + .checked_add(del_um_delta) + .ok_or(anyhow!("supply modification failed"))?, + rate_bps2: self.rate_bps2, + } + }; + Ok(out) + } + + /// Change the conversion rate between delegated_um and um in this supply. + pub fn change_rate(self, rate: &RateData) -> Result { + let um = rate + .unbonded_amount(self.del_um.into()) + .value() + .try_into()?; + + Ok(Self { + um, + del_um: self.del_um, + rate_bps2: rate.validator_exchange_rate.value().try_into()?, + }) + } + } + + /// Initialize the database tables for this module. + pub async fn init_db<'d>(dbtx: &mut PgTransaction<'d>) -> Result<()> { + sqlx::query( + r#" + CREATE TABLE IF NOT EXISTS supply_validators ( + id SERIAL PRIMARY KEY, + identity_key TEXT NOT NULL + ); + "#, + ) + .execute(dbtx.as_mut()) + .await?; + sqlx::query( + r#" + CREATE TABLE IF NOT EXISTS supply_total_staked ( + validator_id INT REFERENCES supply_validators(id), + height BIGINT NOT NULL, + um BIGINT NOT NULL, + del_um BIGINT NOT NULL, + rate_bps2 BIGINT NOT NULL, + PRIMARY KEY (validator_id, height) + ); + "#, + ) + .execute(dbtx.as_mut()) + .await?; + Ok(()) + } + + /// An opaque internal identifier for a given validator. + #[derive(Clone, Copy, PartialEq)] + pub struct ValidatorID(i32); + + /// Define a validator, returning its internal ID. + /// + /// This will have no effect if the validator has already been defined. + pub async fn define_validator( + dbtx: &mut PgTransaction<'_>, + identity_key: &IdentityKey, + ) -> Result { + let ik_string = identity_key.to_string(); + + let id: Option = + sqlx::query_scalar(r#"SELECT id FROM supply_validators WHERE identity_key = $1"#) + .bind(&ik_string) + .fetch_optional(dbtx.as_mut()) + .await?; + + if let Some(id) = id { + return Ok(ValidatorID(id)); + } + let id = sqlx::query_scalar( + r#"INSERT INTO supply_validators VALUES (DEFAULT, $1) RETURNING id"#, + ) + .bind(&ik_string) + .fetch_one(dbtx.as_mut()) + .await?; + Ok(ValidatorID(id)) + } + + /// Get the supply for a given validator at a given height. + async fn get_supply( + dbtx: &mut PgTransaction<'_>, + validator: ValidatorID, + height: u64, + ) -> Result> { + let row: Option<(i64, i64, i64)> = sqlx::query_as( + r#" + SELECT + um, del_um, rate_bps2 + FROM + supply_total_staked + WHERE + validator_id = $1 AND height <= $2 + ORDER BY height DESC + LIMIT 1 + "#, + ) + .bind(validator.0) + .bind(i64::try_from(height)?) + .fetch_optional(dbtx.as_mut()) + .await?; + row.map(|(um, del_um, rate_bps2)| { + let um = um.try_into()?; + let del_um = del_um.try_into()?; + let rate_bps2 = rate_bps2.try_into()?; + Ok(Supply { + um, + del_um, + rate_bps2, + }) + }) + .transpose() + } + + /// Set the supply for a given validator at a given height. + async fn set_supply( + dbtx: &mut PgTransaction<'_>, + validator: ValidatorID, + height: u64, + supply: Supply, + ) -> Result<()> { + sqlx::query( + r#" + INSERT INTO + supply_total_staked + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (validator_id, height) + DO UPDATE SET + um = excluded.um, + del_um = excluded.del_um, + rate_bps2 = excluded.rate_bps2 + "#, + ) + .bind(validator.0) + .bind(i64::try_from(height)?) + .bind(i64::try_from(supply.um)?) + .bind(i64::try_from(supply.del_um)?) + .bind(i64::try_from(supply.rate_bps2)?) + .execute(dbtx.as_mut()) + .await?; + Ok(()) + } + + /// Modify the supply for a given validator, at a given height. + pub async fn modify( + dbtx: &mut PgTransaction<'_>, + validator: ValidatorID, + height: u64, + f: impl FnOnce(Option) -> Result, + ) -> Result<()> { + let supply = get_supply(dbtx, validator, height).await?; + let new_supply = f(supply)?; + set_supply(dbtx, validator, height, new_supply).await + } +} + +/// Supply-relevant events. +/// The supply of the native staking token can change: +/// - When notes are minted (e.g., during initial genesis, or as a result of +/// IBC, though in the case of IBC the circuit breaker should never allow more +/// inbound UM to be minted than outbound um were originally sent.) +/// - As a result of claiming delegation tokens that have increased in +/// underlying UM value due to accumulating the staking rate. +/// - As a result of burning UM which can happen due to arbs, fees, and slashing. +#[derive(Clone, Debug)] +enum Event { + /// A parsed version of [pb::EventUndelegate] + Undelegate { + height: u64, + identity_key: IdentityKey, + unbonded_amount: Amount, + }, + /// A parsed version of [pb::EventDelegate] + Delegate { + height: u64, + identity_key: IdentityKey, + amount: Amount, + }, + /// A parsed version of [pb::EventFundingStreamReward] + FundingStreamReward { height: u64, reward_amount: Amount }, + /// A parsed version of EventRateDataChange + RateDataChange { + height: u64, + identity_key: IdentityKey, + rate_data: RateData, + }, +} + +impl Event { + const NAMES: [&'static str; 4] = [ + "penumbra.core.component.stake.v1.EventUndelegate", + "penumbra.core.component.stake.v1.EventDelegate", + "penumbra.core.component.funding.v1.EventFundingStreamReward", + "penumbra.core.component.stake.v1.EventRateDataChange", + ]; + + async fn index<'d>(&self, dbtx: &mut Transaction<'d, Postgres>) -> anyhow::Result<()> { + match self { + Event::Delegate { + height, + identity_key, + amount, + } => { + let amount = i64::try_from(amount.value())?; + + unstaked_supply::modify(dbtx, *height, |current| { + Ok(current.unwrap_or_default() - amount as u64) + }) + .await?; + + let validator = delegated_supply::define_validator(dbtx, identity_key).await?; + delegated_supply::modify(dbtx, validator, *height, |current| { + current.unwrap_or_default().add_um(amount) + }) + .await + } + Event::Undelegate { + height, + identity_key, + unbonded_amount, + } => { + let amount = i64::try_from(unbonded_amount.value())?; + + unstaked_supply::modify(dbtx, *height, |current| { + Ok(current.unwrap_or_default() + amount as u64) + }) + .await?; + + let validator = delegated_supply::define_validator(dbtx, identity_key).await?; + delegated_supply::modify(dbtx, validator, *height, |current| { + current.unwrap_or_default().add_um(-amount) + }) + .await + } + Event::FundingStreamReward { + height, + reward_amount, + } => { + let amount = u64::try_from(reward_amount.value())?; + + unstaked_supply::modify(dbtx, *height, |current| { + Ok(current.unwrap_or_default() + amount) + }) + .await + } + Event::RateDataChange { + height, + identity_key, + rate_data, + } => { + let validator = delegated_supply::define_validator(dbtx, identity_key).await?; + delegated_supply::modify(dbtx, validator, *height, |current| { + current.unwrap_or_default().change_rate(rate_data) + }) + .await + } + } + } +} + +impl<'a> TryFrom<&'a ContextualizedEvent> for Event { + type Error = anyhow::Error; + + fn try_from(event: &'a ContextualizedEvent) -> Result { + match event.event.kind.as_str() { + // undelegation + x if x == Event::NAMES[0] => { + let pe = pb_stake::EventUndelegate::from_event(event.as_ref())?; + let identity_key = pe + .identity_key + .ok_or(anyhow!("EventUndelegate should contain identity key"))? + .try_into()?; + let unbonded_amount = pe + .amount + .ok_or(anyhow!("EventUndelegate should contain amount"))? + .try_into()?; + Ok(Self::Undelegate { + height: event.block_height, + identity_key, + unbonded_amount, + }) + } + // delegation + x if x == Event::NAMES[1] => { + let pe = pb_stake::EventDelegate::from_event(event.as_ref())?; + let identity_key = pe + .identity_key + .ok_or(anyhow!("EventDelegate should contain identity key"))? + .try_into()?; + let amount = pe + .amount + .ok_or(anyhow!("EventDelegate should contain amount"))? + .try_into()?; + Ok(Self::Delegate { + height: event.block_height, + identity_key, + amount, + }) + } + // funding stream reward + x if x == Event::NAMES[2] => { + let pe = pb_funding::EventFundingStreamReward::from_event(event.as_ref())?; + let reward_amount = Amount::try_from( + pe.reward_amount + .ok_or(anyhow!("event missing in funding stream reward"))?, + )?; + Ok(Self::FundingStreamReward { + height: event.block_height, + reward_amount, + }) + } + // validator rate change + x if x == Event::NAMES[3] => { + let pe = pb_stake::EventRateDataChange::from_event(event.as_ref())?; + let identity_key = pe + .identity_key + .ok_or(anyhow!("EventRateDataChange should contain identity key"))? + .try_into()?; + let rate_data = pe + .rate_data + .ok_or(anyhow!("EventRateDataChange should contain rate data"))? + .try_into()?; + Ok(Self::RateDataChange { + height: event.block_height, + identity_key, + rate_data, + }) + } + x => Err(anyhow!(format!("unrecognized event kind: {x}"))), + } + } +} + +/// Add the initial native token supply. +async fn add_genesis_native_token_allocation_supply<'a>( + dbtx: &mut PgTransaction<'a>, + app_state: &AppState, +) -> Result<()> { + fn content_mints(content: &Content) -> BTreeMap { + let community_pool_mint = iter::once(( + *STAKING_TOKEN_ASSET_ID, + content.community_pool_content.initial_balance.amount, + )); + let allocation_mints = content + .shielded_pool_content + .allocations + .iter() + .map(|allocation| { + let value = allocation.value(); + (value.asset_id, value.amount) + }); + + let mut out = BTreeMap::new(); + for (id, amount) in community_pool_mint.chain(allocation_mints) { + out.entry(id).and_modify(|x| *x += amount).or_insert(amount); + } + out + } + + let content = app_state + .content() + .ok_or_else(|| anyhow::anyhow!("cannot initialized indexer from checkpoint genesis"))?; + let mints = content_mints(content); + + let unstaked_mint = u64::try_from( + mints + .get(&*STAKING_TOKEN_ASSET_ID) + .copied() + .unwrap_or_default() + .value(), + )?; + unstaked_supply::modify(dbtx, 0, |_| Ok(unstaked_mint)).await?; + + // at genesis, assume a 1:1 ratio between delegation amount and native token amount. + for val in &content.stake_content.validators { + let val = Validator::try_from(val.clone())?; + let delegation_amount: i64 = mints + .get(&val.token().id()) + .cloned() + .unwrap_or_default() + .value() + .try_into()?; + + let val_id = delegated_supply::define_validator(dbtx, &val.identity_key).await?; + delegated_supply::modify(dbtx, val_id, 0, |_| { + delegated_supply::Supply::default().add_um(delegation_amount) + }) + .await?; + } + + Ok(()) +} + +#[derive(Debug)] +pub struct Component { + event_strings: HashSet<&'static str>, +} + +impl Component { + pub fn new() -> Self { + let event_strings = Event::NAMES.into_iter().collect(); + Self { event_strings } + } +} + +#[async_trait] +impl AppView for Component { + async fn init_chain( + &self, + dbtx: &mut PgTransaction, + app_state: &serde_json::Value, + ) -> Result<(), anyhow::Error> { + unstaked_supply::init_db(dbtx).await?; + delegated_supply::init_db(dbtx).await?; + + // decode the initial supply from the genesis + // initial app state is not recomputed from events, because events are not emitted in init_chain. + // instead, the indexer directly parses the genesis. + let app_state: penumbra_app::genesis::AppState = + serde_json::from_value(app_state.clone()).context("error decoding app_state json")?; + add_genesis_native_token_allocation_supply(dbtx, &app_state).await?; + + Ok(()) + } + + fn is_relevant(&self, type_str: &str) -> bool { + self.event_strings.contains(type_str) + } + + async fn index_event( + &self, + dbtx: &mut PgTransaction, + event: &ContextualizedEvent, + _src_db: &PgPool, + ) -> Result<(), anyhow::Error> { + Event::try_from(event)?.index(dbtx).await + } +} From 555307b4002a00962640e702834d9bd3d08ffea6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Thu, 26 Sep 2024 13:54:11 -0700 Subject: [PATCH 28/43] pindexer supply with destruction (#4866) ## Describe your changes This adjusts the total supply indexer to account for whether or not value is locked in the dex, the auction component, or locked away after fees and arbitrage. This matters because a significant amount of the native token has been locked in arbitrage, so this affects the end result by about 50% (in terms of net new supply) Merge https://github.com/penumbra-zone/penumbra/pull/4863 first. ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > indexing only. --- Cargo.lock | 2 + crates/bin/pindexer/Cargo.toml | 2 + crates/bin/pindexer/src/supply.rs | 377 ++++++++++++++++++++++++++++-- 3 files changed, 362 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fec5db70ed..e20260455c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5820,7 +5820,9 @@ dependencies = [ "num-bigint", "penumbra-app", "penumbra-asset", + "penumbra-auction", "penumbra-dex", + "penumbra-fee", "penumbra-governance", "penumbra-keys", "penumbra-num", diff --git a/crates/bin/pindexer/Cargo.toml b/crates/bin/pindexer/Cargo.toml index 7eca6148b8..2d6171b5c3 100644 --- a/crates/bin/pindexer/Cargo.toml +++ b/crates/bin/pindexer/Cargo.toml @@ -18,7 +18,9 @@ num-bigint = { version = "0.4" } penumbra-shielded-pool = {workspace = true, default-features = false} penumbra-stake = {workspace = true, default-features = false} penumbra-app = {workspace = true} +penumbra-auction = {workspace = true, default-features = false} penumbra-dex = {workspace = true, default-features = false} +penumbra-fee = {workspace = true, default-features = false} penumbra-keys = {workspace = true, default-features = false} penumbra-governance = {workspace = true, default-features = false} penumbra-num = {workspace = true, default-features = false} diff --git a/crates/bin/pindexer/src/supply.rs b/crates/bin/pindexer/src/supply.rs index 62643a2706..ff51f7adb7 100644 --- a/crates/bin/pindexer/src/supply.rs +++ b/crates/bin/pindexer/src/supply.rs @@ -6,8 +6,11 @@ use penumbra_app::genesis::{AppState, Content}; use penumbra_asset::{asset, STAKING_TOKEN_ASSET_ID}; use penumbra_num::Amount; use penumbra_proto::{ - event::ProtoEvent, penumbra::core::component::funding::v1 as pb_funding, - penumbra::core::component::stake::v1 as pb_stake, + event::ProtoEvent, + penumbra::core::component::{ + auction::v1 as pb_auction, dex::v1 as pb_dex, fee::v1 as pb_fee, funding::v1 as pb_funding, + stake::v1 as pb_stake, + }, }; use penumbra_stake::{rate::RateData, validator::Validator, IdentityKey}; use sqlx::{PgPool, Postgres, Transaction}; @@ -24,7 +27,11 @@ mod unstaked_supply { r#" CREATE TABLE IF NOT EXISTS supply_total_unstaked ( height BIGINT PRIMARY KEY, - um BIGINT NOT NULL + um BIGINT NOT NULL, + auction BIGINT NOT NULL, + dex BIGINT NOT NULL, + arb BIGINT NOT NULL, + fees BIGINT NOT NULL ); "#, ) @@ -33,33 +40,63 @@ mod unstaked_supply { Ok(()) } + /// The supply of unstaked tokens, in various components. + #[derive(Clone, Copy, Debug, Default, PartialEq)] + pub struct Supply { + /// The supply that's not locked in any component. + pub um: u64, + /// The supply locked in the auction component. + pub auction: u64, + /// The supply locked in the dex component. + pub dex: u64, + /// The supply which has been (forever) locked away after arb. + pub arb: u64, + /// The supply which has been (forever) locked away as paid fees. + pub fees: u64, + } + /// Get the supply for at a given height. - async fn get_supply(dbtx: &mut PgTransaction<'_>, height: u64) -> Result> { - let row: Option = sqlx::query_scalar( - "SELECT um FROM supply_total_unstaked WHERE height <= $1 ORDER BY height DESC LIMIT 1", + async fn get_supply(dbtx: &mut PgTransaction<'_>, height: u64) -> Result> { + let row: Option<(i64, i64, i64, i64, i64)> = sqlx::query_as( + "SELECT um, auction, dex, arb, fees FROM supply_total_unstaked WHERE height <= $1 ORDER BY height DESC LIMIT 1", ) .bind(i64::try_from(height)?) .fetch_optional(dbtx.as_mut()) .await?; - row.map(|x| u64::try_from(x)) - .transpose() - .map_err(Into::into) + match row { + None => Ok(None), + Some((um, auction, dex, arb, fees)) => Ok(Some(Supply { + um: um.try_into()?, + auction: auction.try_into()?, + dex: dex.try_into()?, + arb: arb.try_into()?, + fees: fees.try_into()?, + })), + } } /// Set the supply at a given height. - async fn set_supply(dbtx: &mut PgTransaction<'_>, height: u64, supply: u64) -> Result<()> { + async fn set_supply(dbtx: &mut PgTransaction<'_>, height: u64, supply: Supply) -> Result<()> { sqlx::query( r#" INSERT INTO supply_total_unstaked - VALUES ($1, $2) + VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (height) DO UPDATE SET - um = excluded.um + um = excluded.um, + auction = excluded.auction, + dex = excluded.dex, + arb = excluded.arb, + fees = excluded.fees "#, ) .bind(i64::try_from(height)?) - .bind(i64::try_from(supply)?) + .bind(i64::try_from(supply.um)?) + .bind(i64::try_from(supply.auction)?) + .bind(i64::try_from(supply.dex)?) + .bind(i64::try_from(supply.arb)?) + .bind(i64::try_from(supply.fees)?) .execute(dbtx.as_mut()) .await?; Ok(()) @@ -72,7 +109,7 @@ mod unstaked_supply { pub async fn modify( dbtx: &mut PgTransaction<'_>, height: u64, - f: impl FnOnce(Option) -> Result, + f: impl FnOnce(Option) -> Result, ) -> Result<()> { let supply = get_supply(dbtx, height).await?; let new_supply = f(supply)?; @@ -340,14 +377,56 @@ enum Event { identity_key: IdentityKey, rate_data: RateData, }, + /// A parsed version of [auction::EventValueCircuitBreakerCredit] + AuctionVCBCredit { + height: u64, + asset_id: asset::Id, + previous_balance: Amount, + new_balance: Amount, + }, + /// A parsed version of [auction::EventValueCircuitBreakerDebit] + AuctionVCBDebit { + height: u64, + asset_id: asset::Id, + previous_balance: Amount, + new_balance: Amount, + }, + /// A parsed version of [dex::EventValueCircuitBreakerCredit] + DexVCBCredit { + height: u64, + asset_id: asset::Id, + previous_balance: Amount, + new_balance: Amount, + }, + /// A parsed version of [dex::EventValueCircuitBreakerDebit] + DexVCBDebit { + height: u64, + asset_id: asset::Id, + previous_balance: Amount, + new_balance: Amount, + }, + DexArb { + height: u64, + swap_execution: penumbra_dex::SwapExecution, + }, + BlockFees { + height: u64, + total: penumbra_fee::Fee, + }, } impl Event { - const NAMES: [&'static str; 4] = [ + const NAMES: [&'static str; 10] = [ "penumbra.core.component.stake.v1.EventUndelegate", "penumbra.core.component.stake.v1.EventDelegate", "penumbra.core.component.funding.v1.EventFundingStreamReward", "penumbra.core.component.stake.v1.EventRateDataChange", + "penumbra.core.component.auction.v1.EventValueCircuitBreakerCredit", + "penumbra.core.component.auction.v1.EventValueCircuitBreakerDebit", + "penumbra.core.component.dex.v1.EventValueCircuitBreakerCredit", + "penumbra.core.component.dex.v1.EventValueCircuitBreakerDebit", + "penumbra.core.component.dex.v1.EventArbExecution", + "penumbra.core.component.fee.v1.EventBlockFees", ]; async fn index<'d>(&self, dbtx: &mut Transaction<'d, Postgres>) -> anyhow::Result<()> { @@ -360,7 +439,11 @@ impl Event { let amount = i64::try_from(amount.value())?; unstaked_supply::modify(dbtx, *height, |current| { - Ok(current.unwrap_or_default() - amount as u64) + let current = current.unwrap_or_default(); + Ok(unstaked_supply::Supply { + um: current.um - amount as u64, + ..current + }) }) .await?; @@ -378,7 +461,11 @@ impl Event { let amount = i64::try_from(unbonded_amount.value())?; unstaked_supply::modify(dbtx, *height, |current| { - Ok(current.unwrap_or_default() + amount as u64) + let current = current.unwrap_or_default(); + Ok(unstaked_supply::Supply { + um: current.um + amount as u64, + ..current + }) }) .await?; @@ -395,7 +482,11 @@ impl Event { let amount = u64::try_from(reward_amount.value())?; unstaked_supply::modify(dbtx, *height, |current| { - Ok(current.unwrap_or_default() + amount) + let current = current.unwrap_or_default(); + Ok(unstaked_supply::Supply { + um: current.um + amount as u64, + ..current + }) }) .await } @@ -410,6 +501,133 @@ impl Event { }) .await } + Event::AuctionVCBCredit { + height, + asset_id, + previous_balance, + new_balance, + } => { + if *asset_id != *STAKING_TOKEN_ASSET_ID { + return Ok(()); + } + + let added = u64::try_from(new_balance.value() - previous_balance.value())?; + unstaked_supply::modify(dbtx, *height, |current| { + let current = current.unwrap_or_default(); + Ok(unstaked_supply::Supply { + um: current.um - added, + auction: current.auction + added, + ..current + }) + }) + .await + } + Event::AuctionVCBDebit { + height, + asset_id, + previous_balance, + new_balance, + } => { + if *asset_id != *STAKING_TOKEN_ASSET_ID { + return Ok(()); + } + + let removed = u64::try_from(previous_balance.value() - new_balance.value())?; + unstaked_supply::modify(dbtx, *height, |current| { + let current = current.unwrap_or_default(); + Ok(unstaked_supply::Supply { + um: current.um + removed, + auction: current.auction - removed, + ..current + }) + }) + .await + } + Event::DexVCBCredit { + height, + asset_id, + previous_balance, + new_balance, + } => { + if *asset_id != *STAKING_TOKEN_ASSET_ID { + return Ok(()); + } + + let added = u64::try_from(new_balance.value() - previous_balance.value())?; + unstaked_supply::modify(dbtx, *height, |current| { + let current = current.unwrap_or_default(); + Ok(unstaked_supply::Supply { + um: current.um - added, + dex: current.dex + added, + ..current + }) + }) + .await + } + Event::DexVCBDebit { + height, + asset_id, + previous_balance, + new_balance, + } => { + if *asset_id != *STAKING_TOKEN_ASSET_ID { + return Ok(()); + } + + let removed = u64::try_from(previous_balance.value() - new_balance.value())?; + unstaked_supply::modify(dbtx, *height, |current| { + let current = current.unwrap_or_default(); + Ok(unstaked_supply::Supply { + um: current.um + removed, + dex: current.dex - removed, + ..current + }) + }) + .await + } + Event::DexArb { + height, + swap_execution, + } => { + let input = swap_execution.input; + let output = swap_execution.output; + // Ignore any arb event not from the staking token to itself. + if input.asset_id != output.asset_id || input.asset_id != *STAKING_TOKEN_ASSET_ID { + return Ok(()); + } + + let profit = u64::try_from((output.amount - input.amount).value())?; + unstaked_supply::modify(dbtx, *height, |current| { + let current = current.unwrap_or_default(); + Ok(unstaked_supply::Supply { + um: current.um - profit, + arb: current.arb + profit, + ..current + }) + }) + .await + } + Event::BlockFees { height, total } => { + if total.asset_id() != *STAKING_TOKEN_ASSET_ID { + return Ok(()); + } + let amount = u64::try_from(total.amount().value())?; + // This might happen without fees frequently, potentially. + if amount == 0 { + return Ok(()); + } + // We consider the tip to be destroyed too, matching the current logic + // DRAGON: if this changes, this code should use the base fee only. + unstaked_supply::modify(dbtx, *height, |current| { + let current = current.unwrap_or_default(); + Ok(unstaked_supply::Supply { + um: current.um - amount, + fees: current.fees + amount, + ..current + }) + }) + .await + } } } } @@ -482,6 +700,118 @@ impl<'a> TryFrom<&'a ContextualizedEvent> for Event { rate_data, }) } + // AuctionVCBCredit + x if x == Event::NAMES[4] => { + let pe = pb_auction::EventValueCircuitBreakerCredit::from_event(event.as_ref())?; + let asset_id = pe + .asset_id + .ok_or(anyhow!("AuctionVCBCredit missing asset_id"))? + .try_into()?; + let previous_balance = pe + .previous_balance + .ok_or(anyhow!("AuctionVCBCredit missing previous_balance"))? + .try_into()?; + let new_balance = pe + .new_balance + .ok_or(anyhow!("AuctionVCBCredit missing previous_balance"))? + .try_into()?; + Ok(Self::AuctionVCBCredit { + height: event.block_height, + asset_id, + previous_balance, + new_balance, + }) + } + // AuctionVCBDebit + x if x == Event::NAMES[5] => { + let pe = pb_auction::EventValueCircuitBreakerDebit::from_event(event.as_ref())?; + let asset_id = pe + .asset_id + .ok_or(anyhow!("AuctionVCBDebit missing asset_id"))? + .try_into()?; + let previous_balance = pe + .previous_balance + .ok_or(anyhow!("AuctionVCBDebit missing previous_balance"))? + .try_into()?; + let new_balance = pe + .new_balance + .ok_or(anyhow!("AuctionVCBDebit missing previous_balance"))? + .try_into()?; + Ok(Self::AuctionVCBDebit { + height: event.block_height, + asset_id, + previous_balance, + new_balance, + }) + } + // DexVCBCredit + x if x == Event::NAMES[6] => { + let pe = pb_dex::EventValueCircuitBreakerCredit::from_event(event.as_ref())?; + let asset_id = pe + .asset_id + .ok_or(anyhow!("DexVCBCredit missing asset_id"))? + .try_into()?; + let previous_balance = pe + .previous_balance + .ok_or(anyhow!("DexVCBCredit missing previous_balance"))? + .try_into()?; + let new_balance = pe + .new_balance + .ok_or(anyhow!("DexVCBCredit missing previous_balance"))? + .try_into()?; + Ok(Self::DexVCBCredit { + height: event.block_height, + asset_id, + previous_balance, + new_balance, + }) + } + // DexVCBDebit + x if x == Event::NAMES[7] => { + let pe = pb_dex::EventValueCircuitBreakerDebit::from_event(event.as_ref())?; + let asset_id = pe + .asset_id + .ok_or(anyhow!("DexVCBDebit missing asset_id"))? + .try_into()?; + let previous_balance = pe + .previous_balance + .ok_or(anyhow!("DexVCBDebit missing previous_balance"))? + .try_into()?; + let new_balance = pe + .new_balance + .ok_or(anyhow!("DexVCBDebit missing previous_balance"))? + .try_into()?; + Ok(Self::DexVCBDebit { + height: event.block_height, + asset_id, + previous_balance, + new_balance, + }) + } + // DexArb + x if x == Event::NAMES[8] => { + let pe = pb_dex::EventArbExecution::from_event(event.as_ref())?; + let swap_execution = pe + .swap_execution + .ok_or(anyhow!("EventArbExecution missing swap_execution"))? + .try_into()?; + Ok(Self::DexArb { + height: event.block_height, + swap_execution, + }) + } + // BlockFees + x if x == Event::NAMES[9] => { + let pe = pb_fee::EventBlockFees::from_event(event.as_ref())?; + let total = pe + .swapped_fee_total + .ok_or(anyhow!("EventBlockFees missing swapped_fee_total"))? + .try_into()?; + Ok(Self::BlockFees { + height: event.block_height, + total, + }) + } x => Err(anyhow!(format!("unrecognized event kind: {x}"))), } } @@ -525,7 +855,16 @@ async fn add_genesis_native_token_allocation_supply<'a>( .unwrap_or_default() .value(), )?; - unstaked_supply::modify(dbtx, 0, |_| Ok(unstaked_mint)).await?; + unstaked_supply::modify(dbtx, 0, |_| { + Ok(unstaked_supply::Supply { + um: unstaked_mint, + auction: 0, + dex: 0, + arb: 0, + fees: 0, + }) + }) + .await?; // at genesis, assume a 1:1 ratio between delegation amount and native token amount. for val in &content.stake_content.validators { From 772fc69034907cddfca5e68b08ef92b016968d89 Mon Sep 17 00:00:00 2001 From: Chris Czub Date: Mon, 30 Sep 2024 16:10:08 -0400 Subject: [PATCH 29/43] feat: Noble forwarding address registration in pcli (#4865) ## Describe your changes This adds support for registering Noble forwarding addresses, for example: `pcli tx register-forwarding-account --noble-node http://noble-testnet-grpc.polkachu.com:21590 --channel channel-216 --address-or-index 2` Note that your address will need to be funded prior to being registered; the [faucet](https://faucet.circle.com/) can be used along with the `pcli view noble-address ADDRESS_OR_INDEX --channel channel-216` command to get test funds. ## Issue ticket number and link Closes #4857 ## Checklist before requesting a review - [ ] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > pcli only --- crates/bin/pcli/src/command/tx.rs | 132 + crates/bin/pcli/src/command/view.rs | 8 + .../pcli/src/command/view/noble_address.rs | 52 + crates/core/keys/src/address.rs | 45 + crates/proto/src/gen/cosmos.app.v1alpha1.rs | 102 + crates/proto/src/gen/cosmos.auth.v1beta1.rs | 83 + .../proto/src/gen/cosmos.base.abci.v1beta1.rs | 285 ++ .../src/gen/cosmos.crypto.multisig.v1beta1.rs | 35 + crates/proto/src/gen/cosmos.tx.config.v1.rs | 20 + .../src/gen/cosmos.tx.signing.v1beta1.rs | 189 ++ crates/proto/src/gen/cosmos.tx.v1beta1.rs | 1774 ++++++++++++ crates/proto/src/gen/noble.forwarding.v1.rs | 1083 +++++++ .../proto/src/gen/proto_descriptor.bin.no_lfs | Bin 549864 -> 642967 bytes crates/proto/src/gen/tendermint.abci.rs | 2579 +++++++++++++++++ crates/proto/src/gen/tendermint.types.rs | 118 + crates/proto/src/lib.rs | 70 + deployments/scripts/protobuf-codegen | 1 + proto/rust-vendored/google/protobuf/any.proto | 164 ++ .../noble/forwarding/v1/account.proto | 22 + .../noble/forwarding/v1/genesis.proto | 11 + .../noble/forwarding/v1/packet.proto | 18 + .../noble/forwarding/v1/query.proto | 66 + .../noble/forwarding/v1/tx.proto | 29 + tools/proto-compiler/src/main.rs | 12 + 24 files changed, 6898 insertions(+) create mode 100644 crates/bin/pcli/src/command/view/noble_address.rs create mode 100644 crates/proto/src/gen/cosmos.app.v1alpha1.rs create mode 100644 crates/proto/src/gen/cosmos.auth.v1beta1.rs create mode 100644 crates/proto/src/gen/cosmos.base.abci.v1beta1.rs create mode 100644 crates/proto/src/gen/cosmos.crypto.multisig.v1beta1.rs create mode 100644 crates/proto/src/gen/cosmos.tx.config.v1.rs create mode 100644 crates/proto/src/gen/cosmos.tx.signing.v1beta1.rs create mode 100644 crates/proto/src/gen/cosmos.tx.v1beta1.rs create mode 100644 crates/proto/src/gen/noble.forwarding.v1.rs create mode 100644 crates/proto/src/gen/tendermint.abci.rs create mode 100644 proto/rust-vendored/google/protobuf/any.proto create mode 100644 proto/rust-vendored/noble/forwarding/v1/account.proto create mode 100644 proto/rust-vendored/noble/forwarding/v1/genesis.proto create mode 100644 proto/rust-vendored/noble/forwarding/v1/packet.proto create mode 100644 proto/rust-vendored/noble/forwarding/v1/query.proto create mode 100644 proto/rust-vendored/noble/forwarding/v1/tx.proto diff --git a/crates/bin/pcli/src/command/tx.rs b/crates/bin/pcli/src/command/tx.rs index 90c49d523d..9289d1fb5c 100644 --- a/crates/bin/pcli/src/command/tx.rs +++ b/crates/bin/pcli/src/command/tx.rs @@ -51,7 +51,16 @@ use penumbra_proto::{ ValidatorPenaltyRequest, }, }, + cosmos::tx::v1beta1::{ + mode_info::{Single, Sum}, + service_client::ServiceClient as CosmosServiceClient, + AuthInfo as CosmosAuthInfo, BroadcastTxRequest as CosmosBroadcastTxRequest, + Fee as CosmosFee, ModeInfo, SignerInfo as CosmosSignerInfo, Tx as CosmosTx, + TxBody as CosmosTxBody, + }, + noble::forwarding::v1::{ForwardingPubKey, MsgRegisterAccount}, view::v1::GasPricesRequest, + Message, Name as _, }; use penumbra_shielded_pool::Ics20Withdrawal; use penumbra_stake::rate::RateData; @@ -60,6 +69,8 @@ use penumbra_transaction::{gas::swap_claim_gas_cost, Transaction}; use penumbra_view::{SpendableNoteRecord, ViewClient}; use penumbra_wallet::plan::{self, Planner}; use proposal::ProposalCmd; +use tonic::transport::{Channel, ClientTlsConfig}; +use url::Url; use crate::command::tx::auction::AuctionCmd; use crate::App; @@ -258,6 +269,22 @@ pub enum TxCmd { #[clap(long)] use_compat_address: bool, }, + #[clap(display_order = 970)] + /// Register a Noble forwarding account. + RegisterForwardingAccount { + /// The Noble node to submit the registration transaction to. + #[clap(long)] + noble_node: Url, + /// The Noble IBC channel to use for forwarding. + #[clap(long)] + channel: String, + /// The Penumbra address or address index to receive forwarded funds. + #[clap(long)] + address_or_index: String, + /// Whether or not to use an ephemeral address. + #[clap(long)] + ephemeral: bool, + }, /// Broadcast a saved transaction to the network #[clap(display_order = 1000)] Broadcast { @@ -319,6 +346,7 @@ impl TxCmd { TxCmd::Withdraw { .. } => false, TxCmd::Auction(_) => false, TxCmd::Broadcast { .. } => false, + TxCmd::RegisterForwardingAccount { .. } => false, } } @@ -326,6 +354,8 @@ impl TxCmd { // TODO: use a command line flag to determine the fee token, // and pull the appropriate GasPrices out of this rpc response, // the rest should follow + // TODO: fetching this here means that no tx commands + // can be run in offline mode, which is a bit annoying let gas_prices = app .view .as_mut() @@ -1333,7 +1363,109 @@ impl TxCmd { let transaction: Transaction = serde_json::from_slice(&fs::read(transaction)?)?; app.submit_transaction(transaction).await?; } + TxCmd::RegisterForwardingAccount { + noble_node, + channel, + address_or_index, + ephemeral, + } => { + let index: Result = address_or_index.parse(); + let fvk = app.config.full_viewing_key.clone(); + + let address = if let Ok(index) = index { + // address index provided + let (address, _dtk) = match ephemeral { + false => fvk.incoming().payment_address(index.into()), + true => fvk.incoming().ephemeral_address(OsRng, index.into()), + }; + + address + } else { + // address or nothing provided + let address: Address = address_or_index + .parse() + .map_err(|_| anyhow::anyhow!("Provided address is invalid."))?; + + address + }; + + let noble_address = address.noble_forwarding_address(channel); + + println!( + "registering Noble forwarding account with address {} to forward to Penumbra address {}...", + noble_address, address + ); + + let mut noble_client = CosmosServiceClient::new( + Channel::from_shared(noble_node.to_string())? + .tls_config(ClientTlsConfig::new())? + .connect() + .await?, + ); + + let tx = CosmosTx { + body: Some(CosmosTxBody { + messages: vec![pbjson_types::Any { + type_url: MsgRegisterAccount::type_url(), + value: MsgRegisterAccount { + signer: noble_address.to_string(), + recipient: address.to_string(), + channel: channel.to_string(), + } + .encode_to_vec() + .into(), + }], + memo: "".to_string(), + timeout_height: 0, + extension_options: vec![], + non_critical_extension_options: vec![], + }), + auth_info: Some(CosmosAuthInfo { + signer_infos: vec![CosmosSignerInfo { + public_key: Some(pbjson_types::Any { + type_url: ForwardingPubKey::type_url(), + value: ForwardingPubKey { + key: noble_address.bytes(), + } + .encode_to_vec() + .into(), + }), + mode_info: Some(ModeInfo { + // SIGN_MODE_DIRECT + sum: Some(Sum::Single(Single { mode: 1 })), + }), + sequence: 0, + }], + fee: Some(CosmosFee { + amount: vec![], + gas_limit: 200000u64, + payer: "".to_string(), + granter: "".to_string(), + }), + tip: None, + }), + signatures: vec![vec![]], + }; + let r = noble_client + .broadcast_tx(CosmosBroadcastTxRequest { + tx_bytes: tx.encode_to_vec().into(), + // sync + mode: 2, + }) + .await?; + + // let r = noble_client + // .register_account(MsgRegisterAccount { + // signer: noble_address, + // recipient: address.to_string(), + // channel: channel.to_string(), + // }) + // .await?; + + println!("Noble response: {:?}", r); + } } + Ok(()) } } diff --git a/crates/bin/pcli/src/command/view.rs b/crates/bin/pcli/src/command/view.rs index 3d57d1333c..0bc07a2d85 100644 --- a/crates/bin/pcli/src/command/view.rs +++ b/crates/bin/pcli/src/command/view.rs @@ -2,6 +2,7 @@ use anyhow::Result; use address::AddressCmd; use balance::BalanceCmd; +use noble_address::NobleAddressCmd; use staked::StakedCmd; use transaction_hashes::TransactionHashesCmd; use tx::TxCmd; @@ -14,6 +15,7 @@ use self::auction::AuctionCmd; mod address; mod auction; mod balance; +mod noble_address; mod staked; mod wallet_id; @@ -28,6 +30,8 @@ pub enum ViewCmd { WalletId(WalletIdCmd), /// View one of your addresses, either by numerical index, or a random ephemeral one. Address(AddressCmd), + /// View the Noble forwarding address associated with one of your addresses, either by numerical index, or a random ephemeral one. + NobleAddress(NobleAddressCmd), /// View your account balances. Balance(BalanceCmd), /// View your staked delegation tokens. @@ -52,6 +56,7 @@ impl ViewCmd { ViewCmd::Auction(auction_cmd) => auction_cmd.offline(), ViewCmd::WalletId(wallet_id_cmd) => wallet_id_cmd.offline(), ViewCmd::Address(address_cmd) => address_cmd.offline(), + ViewCmd::NobleAddress(address_cmd) => address_cmd.offline(), ViewCmd::Balance(balance_cmd) => balance_cmd.offline(), ViewCmd::Staked(staked_cmd) => staked_cmd.offline(), ViewCmd::Reset(_) => true, @@ -91,6 +96,9 @@ impl ViewCmd { ViewCmd::Address(address_cmd) => { address_cmd.exec(&full_viewing_key)?; } + ViewCmd::NobleAddress(noble_address_cmd) => { + noble_address_cmd.exec(&full_viewing_key)?; + } ViewCmd::Balance(balance_cmd) => { let view_client = app.view(); balance_cmd.exec(view_client).await?; diff --git a/crates/bin/pcli/src/command/view/noble_address.rs b/crates/bin/pcli/src/command/view/noble_address.rs new file mode 100644 index 0000000000..6f696147cc --- /dev/null +++ b/crates/bin/pcli/src/command/view/noble_address.rs @@ -0,0 +1,52 @@ +use anyhow::Result; +use rand_core::OsRng; + +use penumbra_keys::{Address, FullViewingKey}; + +#[derive(Debug, clap::Parser)] +pub struct NobleAddressCmd { + /// The address to provide information about + #[clap(default_value = "0")] + address_or_index: String, + /// Generate an ephemeral address instead of an indexed one. + #[clap(short, long)] + ephemeral: bool, + /// The Noble IBC channel to use for forwarding. + #[clap(long)] + channel: String, +} + +impl NobleAddressCmd { + /// Determine if this command requires a network sync before it executes. + pub fn offline(&self) -> bool { + true + } + + pub fn exec(&self, fvk: &FullViewingKey) -> Result<()> { + let index: Result = self.address_or_index.parse(); + + let address = if let Ok(index) = index { + // address index provided + let (address, _dtk) = match self.ephemeral { + false => fvk.incoming().payment_address(index.into()), + true => fvk.incoming().ephemeral_address(OsRng, index.into()), + }; + + address + } else { + // address or nothing provided + let address: Address = self + .address_or_index + .parse() + .map_err(|_| anyhow::anyhow!("Provided address is invalid."))?; + + address + }; + + let noble_address = address.noble_forwarding_address(&self.channel); + + println!("{}", noble_address); + + Ok(()) + } +} diff --git a/crates/core/keys/src/address.rs b/crates/core/keys/src/address.rs index e6dc61e918..3a136151b5 100644 --- a/crates/core/keys/src/address.rs +++ b/crates/core/keys/src/address.rs @@ -1,6 +1,7 @@ //! [Payment address][Address] facilities. use std::{ + fmt::Display, io::{Cursor, Read, Write}, sync::OnceLock, }; @@ -12,6 +13,7 @@ use f4jumble::{f4jumble, f4jumble_inv}; use penumbra_proto::{penumbra::core::keys::v1 as pb, serializers::bech32str, DomainType}; use rand::{CryptoRng, Rng}; use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; mod r1cs; pub use r1cs::AddressVar; @@ -214,6 +216,49 @@ impl Address { bech32str::Bech32, ) } + + /// Generate a Noble forwarding address. + pub fn noble_forwarding_address(&self, channel: &str) -> NobleForwardingAddress { + NobleForwardingAddress { + channel: channel.to_string(), + recipient: format!("{}", self), + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct NobleForwardingAddress { + pub channel: String, + pub recipient: String, +} + +impl NobleForwardingAddress { + pub fn bytes(&self) -> Vec { + // Based on https://github.com/noble-assets/forwarding/blob/main/x/forwarding/types/account.go#L17 + let channel = self.channel.clone(); + let recipient = self.recipient.clone(); + let bz = format!("{channel}{recipient}").as_bytes().to_owned(); + let th = Sha256::digest("forwarding".as_bytes()); + let mut hasher = Sha256::new(); + hasher.update(th); + hasher.update(bz); + + // This constructs the account bytes for the Noble forwarding address + // Only use bytes 12 and on: + hasher.finalize()[12..].to_vec() + } +} + +impl Display for NobleForwardingAddress { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let addr_bytes = &self.bytes(); + + write!( + f, + "{}", + bech32str::encode(&addr_bytes, "noble", bech32str::Bech32) + ) + } } impl DomainType for Address { diff --git a/crates/proto/src/gen/cosmos.app.v1alpha1.rs b/crates/proto/src/gen/cosmos.app.v1alpha1.rs new file mode 100644 index 0000000000..de74556a7a --- /dev/null +++ b/crates/proto/src/gen/cosmos.app.v1alpha1.rs @@ -0,0 +1,102 @@ +/// ModuleDescriptor describes an app module. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ModuleDescriptor { + /// go_import names the package that should be imported by an app to load the + /// module in the runtime module registry. It is required to make debugging + /// of configuration errors easier for users. + #[prost(string, tag = "1")] + pub go_import: ::prost::alloc::string::String, + /// use_package refers to a protobuf package that this module + /// uses and exposes to the world. In an app, only one module should "use" + /// or own a single protobuf package. It is assumed that the module uses + /// all of the .proto files in a single package. + #[prost(message, repeated, tag = "2")] + pub use_package: ::prost::alloc::vec::Vec, + /// can_migrate_from defines which module versions this module can migrate + /// state from. The framework will check that one module version is able to + /// migrate from a previous module version before attempting to update its + /// config. It is assumed that modules can transitively migrate from earlier + /// versions. For instance if v3 declares it can migrate from v2, and v2 + /// declares it can migrate from v1, the framework knows how to migrate + /// from v1 to v3, assuming all 3 module versions are registered at runtime. + #[prost(message, repeated, tag = "3")] + pub can_migrate_from: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for ModuleDescriptor { + const NAME: &'static str = "ModuleDescriptor"; + const PACKAGE: &'static str = "cosmos.app.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.app.v1alpha1.{}", Self::NAME) + } +} +/// PackageReference is a reference to a protobuf package used by a module. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PackageReference { + /// name is the fully-qualified name of the package. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// revision is the optional revision of the package that is being used. + /// Protobuf packages used in Cosmos should generally have a major version + /// as the last part of the package name, ex. foo.bar.baz.v1. + /// The revision of a package can be thought of as the minor version of a + /// package which has additional backwards compatible definitions that weren't + /// present in a previous version. + /// + /// A package should indicate its revision with a source code comment + /// above the package declaration in one of its files containing the + /// text "Revision N" where N is an integer revision. All packages start + /// at revision 0 the first time they are released in a module. + /// + /// When a new version of a module is released and items are added to existing + /// .proto files, these definitions should contain comments of the form + /// "Since: Revision N" where N is an integer revision. + /// + /// When the module runtime starts up, it will check the pinned proto + /// image and panic if there are runtime protobuf definitions that are not + /// in the pinned descriptor which do not have + /// a "Since Revision N" comment or have a "Since Revision N" comment where + /// N is \<= to the revision specified here. This indicates that the protobuf + /// files have been updated, but the pinned file descriptor hasn't. + /// + /// If there are items in the pinned file descriptor with a revision + /// greater than the value indicated here, this will also cause a panic + /// as it may mean that the pinned descriptor for a legacy module has been + /// improperly updated or that there is some other versioning discrepancy. + /// Runtime protobuf definitions will also be checked for compatibility + /// with pinned file descriptors to make sure there are no incompatible changes. + /// + /// This behavior ensures that: + /// + /// * pinned proto images are up-to-date + /// * protobuf files are carefully annotated with revision comments which + /// are important good client UX + /// * protobuf files are changed in backwards and forwards compatible ways + #[prost(uint32, tag = "2")] + pub revision: u32, +} +impl ::prost::Name for PackageReference { + const NAME: &'static str = "PackageReference"; + const PACKAGE: &'static str = "cosmos.app.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.app.v1alpha1.{}", Self::NAME) + } +} +/// MigrateFromInfo is information on a module version that a newer module +/// can migrate from. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MigrateFromInfo { + /// module is the fully-qualified protobuf name of the module config object + /// for the previous module version, ex: "cosmos.group.module.v1.Module". + #[prost(string, tag = "1")] + pub module: ::prost::alloc::string::String, +} +impl ::prost::Name for MigrateFromInfo { + const NAME: &'static str = "MigrateFromInfo"; + const PACKAGE: &'static str = "cosmos.app.v1alpha1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.app.v1alpha1.{}", Self::NAME) + } +} diff --git a/crates/proto/src/gen/cosmos.auth.v1beta1.rs b/crates/proto/src/gen/cosmos.auth.v1beta1.rs new file mode 100644 index 0000000000..494ee21f5f --- /dev/null +++ b/crates/proto/src/gen/cosmos.auth.v1beta1.rs @@ -0,0 +1,83 @@ +/// BaseAccount defines a base account type. It contains all the necessary fields +/// for basic account functionality. Any custom account type should extend this +/// type for additional functionality (e.g. vesting). +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BaseAccount { + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub pub_key: ::core::option::Option<::pbjson_types::Any>, + #[prost(uint64, tag = "3")] + pub account_number: u64, + #[prost(uint64, tag = "4")] + pub sequence: u64, +} +impl ::prost::Name for BaseAccount { + const NAME: &'static str = "BaseAccount"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.auth.v1beta1.{}", Self::NAME) + } +} +/// ModuleAccount defines an account for modules that holds coins on a pool. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ModuleAccount { + #[prost(message, optional, tag = "1")] + pub base_account: ::core::option::Option, + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + #[prost(string, repeated, tag = "3")] + pub permissions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +impl ::prost::Name for ModuleAccount { + const NAME: &'static str = "ModuleAccount"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.auth.v1beta1.{}", Self::NAME) + } +} +/// ModuleCredential represents a unclaimable pubkey for base accounts controlled by modules. +/// +/// Since: cosmos-sdk 0.47 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ModuleCredential { + /// module_name is the name of the module used for address derivation (passed into address.Module). + #[prost(string, tag = "1")] + pub module_name: ::prost::alloc::string::String, + /// derivation_keys is for deriving a module account address (passed into address.Module) + /// adding more keys creates sub-account addresses (passed into address.Derive) + #[prost(bytes = "vec", repeated, tag = "2")] + pub derivation_keys: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +impl ::prost::Name for ModuleCredential { + const NAME: &'static str = "ModuleCredential"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.auth.v1beta1.{}", Self::NAME) + } +} +/// Params defines the parameters for the auth module. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Params { + #[prost(uint64, tag = "1")] + pub max_memo_characters: u64, + #[prost(uint64, tag = "2")] + pub tx_sig_limit: u64, + #[prost(uint64, tag = "3")] + pub tx_size_cost_per_byte: u64, + #[prost(uint64, tag = "4")] + pub sig_verify_cost_ed25519: u64, + #[prost(uint64, tag = "5")] + pub sig_verify_cost_secp256k1: u64, +} +impl ::prost::Name for Params { + const NAME: &'static str = "Params"; + const PACKAGE: &'static str = "cosmos.auth.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.auth.v1beta1.{}", Self::NAME) + } +} diff --git a/crates/proto/src/gen/cosmos.base.abci.v1beta1.rs b/crates/proto/src/gen/cosmos.base.abci.v1beta1.rs new file mode 100644 index 0000000000..574a2519a4 --- /dev/null +++ b/crates/proto/src/gen/cosmos.base.abci.v1beta1.rs @@ -0,0 +1,285 @@ +/// TxResponse defines a structure containing relevant tx data and metadata. The +/// tags are stringified and the log is JSON decoded. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxResponse { + /// The block height + #[prost(int64, tag = "1")] + pub height: i64, + /// The transaction hash. + #[prost(string, tag = "2")] + pub txhash: ::prost::alloc::string::String, + /// Namespace for the Code + #[prost(string, tag = "3")] + pub codespace: ::prost::alloc::string::String, + /// Response code. + #[prost(uint32, tag = "4")] + pub code: u32, + /// Result bytes, if any. + #[prost(string, tag = "5")] + pub data: ::prost::alloc::string::String, + /// The output of the application's logger (raw string). May be + /// non-deterministic. + #[prost(string, tag = "6")] + pub raw_log: ::prost::alloc::string::String, + /// The output of the application's logger (typed). May be non-deterministic. + #[prost(message, repeated, tag = "7")] + pub logs: ::prost::alloc::vec::Vec, + /// Additional information. May be non-deterministic. + #[prost(string, tag = "8")] + pub info: ::prost::alloc::string::String, + /// Amount of gas requested for transaction. + #[prost(int64, tag = "9")] + pub gas_wanted: i64, + /// Amount of gas consumed by transaction. + #[prost(int64, tag = "10")] + pub gas_used: i64, + /// The request transaction bytes. + #[prost(message, optional, tag = "11")] + pub tx: ::core::option::Option<::pbjson_types::Any>, + /// Time of the previous block. For heights > 1, it's the weighted median of + /// the timestamps of the valid votes in the block.LastCommit. For height == 1, + /// it's genesis time. + #[prost(string, tag = "12")] + pub timestamp: ::prost::alloc::string::String, + /// Events defines all the events emitted by processing a transaction. Note, + /// these events include those emitted by processing all the messages and those + /// emitted from the ante. Whereas Logs contains the events, with + /// additional metadata, emitted only by processing the messages. + /// + /// Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + #[prost(message, repeated, tag = "13")] + pub events: ::prost::alloc::vec::Vec< + super::super::super::super::tendermint::abci::Event, + >, +} +impl ::prost::Name for TxResponse { + const NAME: &'static str = "TxResponse"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} +/// ABCIMessageLog defines a structure containing an indexed tx ABCI message log. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AbciMessageLog { + #[prost(uint32, tag = "1")] + pub msg_index: u32, + #[prost(string, tag = "2")] + pub log: ::prost::alloc::string::String, + /// Events contains a slice of Event objects that were emitted during some + /// execution. + #[prost(message, repeated, tag = "3")] + pub events: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for AbciMessageLog { + const NAME: &'static str = "ABCIMessageLog"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} +/// StringEvent defines en Event object wrapper where all the attributes +/// contain key/value pairs that are strings instead of raw bytes. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StringEvent { + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub attributes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for StringEvent { + const NAME: &'static str = "StringEvent"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} +/// Attribute defines an attribute wrapper where the key and value are +/// strings instead of raw bytes. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Attribute { + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub value: ::prost::alloc::string::String, +} +impl ::prost::Name for Attribute { + const NAME: &'static str = "Attribute"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} +/// GasInfo defines tx execution gas context. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GasInfo { + /// GasWanted is the maximum units of work we allow this tx to perform. + #[prost(uint64, tag = "1")] + pub gas_wanted: u64, + /// GasUsed is the amount of gas actually consumed. + #[prost(uint64, tag = "2")] + pub gas_used: u64, +} +impl ::prost::Name for GasInfo { + const NAME: &'static str = "GasInfo"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} +/// Result is the union of ResponseFormat and ResponseCheckTx. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Result { + /// Data is any data returned from message or handler execution. It MUST be + /// length prefixed in order to separate data from multiple message executions. + /// Deprecated. This field is still populated, but prefer msg_response instead + /// because it also contains the Msg response typeURL. + #[deprecated] + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, + /// Log contains the log information from message or handler execution. + #[prost(string, tag = "2")] + pub log: ::prost::alloc::string::String, + /// Events contains a slice of Event objects that were emitted during message + /// or handler execution. + #[prost(message, repeated, tag = "3")] + pub events: ::prost::alloc::vec::Vec< + super::super::super::super::tendermint::abci::Event, + >, + /// msg_responses contains the Msg handler responses type packed in Anys. + /// + /// Since: cosmos-sdk 0.46 + #[prost(message, repeated, tag = "4")] + pub msg_responses: ::prost::alloc::vec::Vec<::pbjson_types::Any>, +} +impl ::prost::Name for Result { + const NAME: &'static str = "Result"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} +/// SimulationResponse defines the response generated when a transaction is +/// successfully simulated. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SimulationResponse { + #[prost(message, optional, tag = "1")] + pub gas_info: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub result: ::core::option::Option, +} +impl ::prost::Name for SimulationResponse { + const NAME: &'static str = "SimulationResponse"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} +/// MsgData defines the data returned in a Result object during message +/// execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgData { + #[prost(string, tag = "1")] + pub msg_type: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "2")] + pub data: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for MsgData { + const NAME: &'static str = "MsgData"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} +/// TxMsgData defines a list of MsgData. A transaction will have a MsgData object +/// for each message. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxMsgData { + /// data field is deprecated and not populated. + #[deprecated] + #[prost(message, repeated, tag = "1")] + pub data: ::prost::alloc::vec::Vec, + /// msg_responses contains the Msg handler responses packed into Anys. + /// + /// Since: cosmos-sdk 0.46 + #[prost(message, repeated, tag = "2")] + pub msg_responses: ::prost::alloc::vec::Vec<::pbjson_types::Any>, +} +impl ::prost::Name for TxMsgData { + const NAME: &'static str = "TxMsgData"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} +/// SearchTxsResult defines a structure for querying txs pageable +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SearchTxsResult { + /// Count of all txs + #[prost(uint64, tag = "1")] + pub total_count: u64, + /// Count of txs in current page + #[prost(uint64, tag = "2")] + pub count: u64, + /// Index of current page, start from 1 + #[prost(uint64, tag = "3")] + pub page_number: u64, + /// Count of total pages + #[prost(uint64, tag = "4")] + pub page_total: u64, + /// Max count txs per page + #[prost(uint64, tag = "5")] + pub limit: u64, + /// List of txs in current page + #[prost(message, repeated, tag = "6")] + pub txs: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for SearchTxsResult { + const NAME: &'static str = "SearchTxsResult"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} +/// SearchBlocksResult defines a structure for querying blocks pageable +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SearchBlocksResult { + /// Count of all blocks + #[prost(int64, tag = "1")] + pub total_count: i64, + /// Count of blocks in current page + #[prost(int64, tag = "2")] + pub count: i64, + /// Index of current page, start from 1 + #[prost(int64, tag = "3")] + pub page_number: i64, + /// Count of total pages + #[prost(int64, tag = "4")] + pub page_total: i64, + /// Max count blocks per page + #[prost(int64, tag = "5")] + pub limit: i64, + /// List of blocks in current page + #[prost(message, repeated, tag = "6")] + pub blocks: ::prost::alloc::vec::Vec< + super::super::super::super::tendermint::types::Block, + >, +} +impl ::prost::Name for SearchBlocksResult { + const NAME: &'static str = "SearchBlocksResult"; + const PACKAGE: &'static str = "cosmos.base.abci.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.base.abci.v1beta1.{}", Self::NAME) + } +} diff --git a/crates/proto/src/gen/cosmos.crypto.multisig.v1beta1.rs b/crates/proto/src/gen/cosmos.crypto.multisig.v1beta1.rs new file mode 100644 index 0000000000..1d784f6978 --- /dev/null +++ b/crates/proto/src/gen/cosmos.crypto.multisig.v1beta1.rs @@ -0,0 +1,35 @@ +/// MultiSignature wraps the signatures from a multisig.LegacyAminoPubKey. +/// See cosmos.tx.v1betata1.ModeInfo.Multi for how to specify which signers +/// signed and with which modes. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MultiSignature { + #[prost(bytes = "vec", repeated, tag = "1")] + pub signatures: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +impl ::prost::Name for MultiSignature { + const NAME: &'static str = "MultiSignature"; + const PACKAGE: &'static str = "cosmos.crypto.multisig.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.crypto.multisig.v1beta1.{}", Self::NAME) + } +} +/// CompactBitArray is an implementation of a space efficient bit array. +/// This is used to ensure that the encoded data takes up a minimal amount of +/// space after proto encoding. +/// This is not thread safe, and is not intended for concurrent usage. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompactBitArray { + #[prost(uint32, tag = "1")] + pub extra_bits_stored: u32, + #[prost(bytes = "vec", tag = "2")] + pub elems: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for CompactBitArray { + const NAME: &'static str = "CompactBitArray"; + const PACKAGE: &'static str = "cosmos.crypto.multisig.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.crypto.multisig.v1beta1.{}", Self::NAME) + } +} diff --git a/crates/proto/src/gen/cosmos.tx.config.v1.rs b/crates/proto/src/gen/cosmos.tx.config.v1.rs new file mode 100644 index 0000000000..b79ab2e0fd --- /dev/null +++ b/crates/proto/src/gen/cosmos.tx.config.v1.rs @@ -0,0 +1,20 @@ +/// Config is the config object of the x/auth/tx package. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Config { + /// skip_ante_handler defines whether the ante handler registration should be skipped in case an app wants to override + /// this functionality. + #[prost(bool, tag = "1")] + pub skip_ante_handler: bool, + /// skip_post_handler defines whether the post handler registration should be skipped in case an app wants to override + /// this functionality. + #[prost(bool, tag = "2")] + pub skip_post_handler: bool, +} +impl ::prost::Name for Config { + const NAME: &'static str = "Config"; + const PACKAGE: &'static str = "cosmos.tx.config.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.config.v1.{}", Self::NAME) + } +} diff --git a/crates/proto/src/gen/cosmos.tx.signing.v1beta1.rs b/crates/proto/src/gen/cosmos.tx.signing.v1beta1.rs new file mode 100644 index 0000000000..c5b8f2f52f --- /dev/null +++ b/crates/proto/src/gen/cosmos.tx.signing.v1beta1.rs @@ -0,0 +1,189 @@ +/// SignatureDescriptors wraps multiple SignatureDescriptor's. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignatureDescriptors { + /// signatures are the signature descriptors + #[prost(message, repeated, tag = "1")] + pub signatures: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for SignatureDescriptors { + const NAME: &'static str = "SignatureDescriptors"; + const PACKAGE: &'static str = "cosmos.tx.signing.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.signing.v1beta1.{}", Self::NAME) + } +} +/// SignatureDescriptor is a convenience type which represents the full data for +/// a signature including the public key of the signer, signing modes and the +/// signature itself. It is primarily used for coordinating signatures between +/// clients. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignatureDescriptor { + /// public_key is the public key of the signer + #[prost(message, optional, tag = "1")] + pub public_key: ::core::option::Option<::pbjson_types::Any>, + #[prost(message, optional, tag = "2")] + pub data: ::core::option::Option, + /// sequence is the sequence of the account, which describes the + /// number of committed transactions signed by a given address. It is used to prevent + /// replay attacks. + #[prost(uint64, tag = "3")] + pub sequence: u64, +} +/// Nested message and enum types in `SignatureDescriptor`. +pub mod signature_descriptor { + /// Data represents signature data + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Data { + /// sum is the oneof that specifies whether this represents single or multi-signature data + #[prost(oneof = "data::Sum", tags = "1, 2")] + pub sum: ::core::option::Option, + } + /// Nested message and enum types in `Data`. + pub mod data { + /// Single is the signature data for a single signer + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Single { + /// mode is the signing mode of the single signer + #[prost(enumeration = "super::super::SignMode", tag = "1")] + pub mode: i32, + /// signature is the raw signature bytes + #[prost(bytes = "vec", tag = "2")] + pub signature: ::prost::alloc::vec::Vec, + } + impl ::prost::Name for Single { + const NAME: &'static str = "Single"; + const PACKAGE: &'static str = "cosmos.tx.signing.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!( + "cosmos.tx.signing.v1beta1.SignatureDescriptor.Data.{}", Self::NAME + ) + } + } + /// Multi is the signature data for a multisig public key + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Multi { + /// bitarray specifies which keys within the multisig are signing + #[prost(message, optional, tag = "1")] + pub bitarray: ::core::option::Option< + super::super::super::super::super::crypto::multisig::v1beta1::CompactBitArray, + >, + /// signatures is the signatures of the multi-signature + #[prost(message, repeated, tag = "2")] + pub signatures: ::prost::alloc::vec::Vec, + } + impl ::prost::Name for Multi { + const NAME: &'static str = "Multi"; + const PACKAGE: &'static str = "cosmos.tx.signing.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!( + "cosmos.tx.signing.v1beta1.SignatureDescriptor.Data.{}", Self::NAME + ) + } + } + /// sum is the oneof that specifies whether this represents single or multi-signature data + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Sum { + /// single represents a single signer + #[prost(message, tag = "1")] + Single(Single), + /// multi represents a multisig signer + #[prost(message, tag = "2")] + Multi(Multi), + } + } + impl ::prost::Name for Data { + const NAME: &'static str = "Data"; + const PACKAGE: &'static str = "cosmos.tx.signing.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!( + "cosmos.tx.signing.v1beta1.SignatureDescriptor.{}", Self::NAME + ) + } + } +} +impl ::prost::Name for SignatureDescriptor { + const NAME: &'static str = "SignatureDescriptor"; + const PACKAGE: &'static str = "cosmos.tx.signing.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.signing.v1beta1.{}", Self::NAME) + } +} +/// SignMode represents a signing mode with its own security guarantees. +/// +/// This enum should be considered a registry of all known sign modes +/// in the Cosmos ecosystem. Apps are not expected to support all known +/// sign modes. Apps that would like to support custom sign modes are +/// encouraged to open a small PR against this file to add a new case +/// to this SignMode enum describing their sign mode so that different +/// apps have a consistent version of this enum. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SignMode { + /// SIGN_MODE_UNSPECIFIED specifies an unknown signing mode and will be + /// rejected. + Unspecified = 0, + /// SIGN_MODE_DIRECT specifies a signing mode which uses SignDoc and is + /// verified with raw bytes from Tx. + Direct = 1, + /// SIGN_MODE_TEXTUAL is a future signing mode that will verify some + /// human-readable textual representation on top of the binary representation + /// from SIGN_MODE_DIRECT. It is currently experimental, and should be used + /// for testing purposes only, until Textual is fully released. Please follow + /// the tracking issue + Textual = 2, + /// SIGN_MODE_DIRECT_AUX specifies a signing mode which uses + /// SignDocDirectAux. As opposed to SIGN_MODE_DIRECT, this sign mode does not + /// require signers signing over other signers' `signer_info`. It also allows + /// for adding Tips in transactions. + /// + /// Since: cosmos-sdk 0.46 + DirectAux = 3, + /// SIGN_MODE_LEGACY_AMINO_JSON is a backwards compatibility mode which uses + /// Amino JSON and will be removed in the future. + LegacyAminoJson = 127, + /// SIGN_MODE_EIP_191 specifies the sign mode for EIP 191 signing on the Cosmos + /// SDK. Ref: + /// + /// Currently, SIGN_MODE_EIP_191 is registered as a SignMode enum variant, + /// but is not implemented on the SDK by default. To enable EIP-191, you need + /// to pass a custom `TxConfig` that has an implementation of + /// `SignModeHandler` for EIP-191. The SDK may decide to fully support + /// EIP-191 in the future. + /// + /// Since: cosmos-sdk 0.45.2 + Eip191 = 191, +} +impl SignMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SignMode::Unspecified => "SIGN_MODE_UNSPECIFIED", + SignMode::Direct => "SIGN_MODE_DIRECT", + SignMode::Textual => "SIGN_MODE_TEXTUAL", + SignMode::DirectAux => "SIGN_MODE_DIRECT_AUX", + SignMode::LegacyAminoJson => "SIGN_MODE_LEGACY_AMINO_JSON", + SignMode::Eip191 => "SIGN_MODE_EIP_191", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SIGN_MODE_UNSPECIFIED" => Some(Self::Unspecified), + "SIGN_MODE_DIRECT" => Some(Self::Direct), + "SIGN_MODE_TEXTUAL" => Some(Self::Textual), + "SIGN_MODE_DIRECT_AUX" => Some(Self::DirectAux), + "SIGN_MODE_LEGACY_AMINO_JSON" => Some(Self::LegacyAminoJson), + "SIGN_MODE_EIP_191" => Some(Self::Eip191), + _ => None, + } + } +} diff --git a/crates/proto/src/gen/cosmos.tx.v1beta1.rs b/crates/proto/src/gen/cosmos.tx.v1beta1.rs new file mode 100644 index 0000000000..8aeb187cda --- /dev/null +++ b/crates/proto/src/gen/cosmos.tx.v1beta1.rs @@ -0,0 +1,1774 @@ +/// Tx is the standard type used for broadcasting transactions. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Tx { + /// body is the processable content of the transaction + #[prost(message, optional, tag = "1")] + pub body: ::core::option::Option, + /// auth_info is the authorization related content of the transaction, + /// specifically signers, signer modes and fee + #[prost(message, optional, tag = "2")] + pub auth_info: ::core::option::Option, + /// signatures is a list of signatures that matches the length and order of + /// AuthInfo's signer_infos to allow connecting signature meta information like + /// public key and signing mode by position. + #[prost(bytes = "vec", repeated, tag = "3")] + pub signatures: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +impl ::prost::Name for Tx { + const NAME: &'static str = "Tx"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// TxRaw is a variant of Tx that pins the signer's exact binary representation +/// of body and auth_info. This is used for signing, broadcasting and +/// verification. The binary `serialize(tx: TxRaw)` is stored in Tendermint and +/// the hash `sha256(serialize(tx: TxRaw))` becomes the "txhash", commonly used +/// as the transaction ID. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxRaw { + /// body_bytes is a protobuf serialization of a TxBody that matches the + /// representation in SignDoc. + #[prost(bytes = "vec", tag = "1")] + pub body_bytes: ::prost::alloc::vec::Vec, + /// auth_info_bytes is a protobuf serialization of an AuthInfo that matches the + /// representation in SignDoc. + #[prost(bytes = "vec", tag = "2")] + pub auth_info_bytes: ::prost::alloc::vec::Vec, + /// signatures is a list of signatures that matches the length and order of + /// AuthInfo's signer_infos to allow connecting signature meta information like + /// public key and signing mode by position. + #[prost(bytes = "vec", repeated, tag = "3")] + pub signatures: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +impl ::prost::Name for TxRaw { + const NAME: &'static str = "TxRaw"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// SignDoc is the type used for generating sign bytes for SIGN_MODE_DIRECT. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignDoc { + /// body_bytes is protobuf serialization of a TxBody that matches the + /// representation in TxRaw. + #[prost(bytes = "vec", tag = "1")] + pub body_bytes: ::prost::alloc::vec::Vec, + /// auth_info_bytes is a protobuf serialization of an AuthInfo that matches the + /// representation in TxRaw. + #[prost(bytes = "vec", tag = "2")] + pub auth_info_bytes: ::prost::alloc::vec::Vec, + /// chain_id is the unique identifier of the chain this transaction targets. + /// It prevents signed transactions from being used on another chain by an + /// attacker + #[prost(string, tag = "3")] + pub chain_id: ::prost::alloc::string::String, + /// account_number is the account number of the account in state + #[prost(uint64, tag = "4")] + pub account_number: u64, +} +impl ::prost::Name for SignDoc { + const NAME: &'static str = "SignDoc"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// SignDocDirectAux is the type used for generating sign bytes for +/// SIGN_MODE_DIRECT_AUX. +/// +/// Since: cosmos-sdk 0.46 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignDocDirectAux { + /// body_bytes is protobuf serialization of a TxBody that matches the + /// representation in TxRaw. + #[prost(bytes = "vec", tag = "1")] + pub body_bytes: ::prost::alloc::vec::Vec, + /// public_key is the public key of the signing account. + #[prost(message, optional, tag = "2")] + pub public_key: ::core::option::Option<::pbjson_types::Any>, + /// chain_id is the identifier of the chain this transaction targets. + /// It prevents signed transactions from being used on another chain by an + /// attacker. + #[prost(string, tag = "3")] + pub chain_id: ::prost::alloc::string::String, + /// account_number is the account number of the account in state. + #[prost(uint64, tag = "4")] + pub account_number: u64, + /// sequence is the sequence number of the signing account. + #[prost(uint64, tag = "5")] + pub sequence: u64, + /// Tip is the optional tip used for transactions fees paid in another denom. + /// It should be left empty if the signer is not the tipper for this + /// transaction. + /// + /// This field is ignored if the chain didn't enable tips, i.e. didn't add the + /// `TipDecorator` in its posthandler. + #[prost(message, optional, tag = "6")] + pub tip: ::core::option::Option, +} +impl ::prost::Name for SignDocDirectAux { + const NAME: &'static str = "SignDocDirectAux"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// TxBody is the body of a transaction that all signers sign over. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxBody { + /// messages is a list of messages to be executed. The required signers of + /// those messages define the number and order of elements in AuthInfo's + /// signer_infos and Tx's signatures. Each required signer address is added to + /// the list only the first time it occurs. + /// By convention, the first required signer (usually from the first message) + /// is referred to as the primary signer and pays the fee for the whole + /// transaction. + #[prost(message, repeated, tag = "1")] + pub messages: ::prost::alloc::vec::Vec<::pbjson_types::Any>, + /// memo is any arbitrary note/comment to be added to the transaction. + /// WARNING: in clients, any publicly exposed text should not be called memo, + /// but should be called `note` instead (see ). + #[prost(string, tag = "2")] + pub memo: ::prost::alloc::string::String, + /// timeout is the block height after which this transaction will not + /// be processed by the chain + #[prost(uint64, tag = "3")] + pub timeout_height: u64, + /// extension_options are arbitrary options that can be added by chains + /// when the default options are not sufficient. If any of these are present + /// and can't be handled, the transaction will be rejected + #[prost(message, repeated, tag = "1023")] + pub extension_options: ::prost::alloc::vec::Vec<::pbjson_types::Any>, + /// extension_options are arbitrary options that can be added by chains + /// when the default options are not sufficient. If any of these are present + /// and can't be handled, they will be ignored + #[prost(message, repeated, tag = "2047")] + pub non_critical_extension_options: ::prost::alloc::vec::Vec<::pbjson_types::Any>, +} +impl ::prost::Name for TxBody { + const NAME: &'static str = "TxBody"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// AuthInfo describes the fee and signer modes that are used to sign a +/// transaction. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthInfo { + /// signer_infos defines the signing modes for the required signers. The number + /// and order of elements must match the required signers from TxBody's + /// messages. The first element is the primary signer and the one which pays + /// the fee. + #[prost(message, repeated, tag = "1")] + pub signer_infos: ::prost::alloc::vec::Vec, + /// Fee is the fee and gas limit for the transaction. The first signer is the + /// primary signer and the one which pays the fee. The fee can be calculated + /// based on the cost of evaluating the body and doing signature verification + /// of the signers. This can be estimated via simulation. + #[prost(message, optional, tag = "2")] + pub fee: ::core::option::Option, + /// Tip is the optional tip used for transactions fees paid in another denom. + /// + /// This field is ignored if the chain didn't enable tips, i.e. didn't add the + /// `TipDecorator` in its posthandler. + /// + /// Since: cosmos-sdk 0.46 + #[prost(message, optional, tag = "3")] + pub tip: ::core::option::Option, +} +impl ::prost::Name for AuthInfo { + const NAME: &'static str = "AuthInfo"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// SignerInfo describes the public key and signing mode of a single top-level +/// signer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignerInfo { + /// public_key is the public key of the signer. It is optional for accounts + /// that already exist in state. If unset, the verifier can use the required + /// signer address for this position and lookup the public key. + #[prost(message, optional, tag = "1")] + pub public_key: ::core::option::Option<::pbjson_types::Any>, + /// mode_info describes the signing mode of the signer and is a nested + /// structure to support nested multisig pubkey's + #[prost(message, optional, tag = "2")] + pub mode_info: ::core::option::Option, + /// sequence is the sequence of the account, which describes the + /// number of committed transactions signed by a given address. It is used to + /// prevent replay attacks. + #[prost(uint64, tag = "3")] + pub sequence: u64, +} +impl ::prost::Name for SignerInfo { + const NAME: &'static str = "SignerInfo"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// ModeInfo describes the signing mode of a single or nested multisig signer. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ModeInfo { + /// sum is the oneof that specifies whether this represents a single or nested + /// multisig signer + #[prost(oneof = "mode_info::Sum", tags = "1, 2")] + pub sum: ::core::option::Option, +} +/// Nested message and enum types in `ModeInfo`. +pub mod mode_info { + /// Single is the mode info for a single signer. It is structured as a message + /// to allow for additional fields such as locale for SIGN_MODE_TEXTUAL in the + /// future + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Single { + /// mode is the signing mode of the single signer + #[prost(enumeration = "super::super::signing::v1beta1::SignMode", tag = "1")] + pub mode: i32, + } + impl ::prost::Name for Single { + const NAME: &'static str = "Single"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.ModeInfo.{}", Self::NAME) + } + } + /// Multi is the mode info for a multisig public key + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct Multi { + /// bitarray specifies which keys within the multisig are signing + #[prost(message, optional, tag = "1")] + pub bitarray: ::core::option::Option< + super::super::super::crypto::multisig::v1beta1::CompactBitArray, + >, + /// mode_infos is the corresponding modes of the signers of the multisig + /// which could include nested multisig public keys + #[prost(message, repeated, tag = "2")] + pub mode_infos: ::prost::alloc::vec::Vec, + } + impl ::prost::Name for Multi { + const NAME: &'static str = "Multi"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.ModeInfo.{}", Self::NAME) + } + } + /// sum is the oneof that specifies whether this represents a single or nested + /// multisig signer + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Sum { + /// single represents a single signer + #[prost(message, tag = "1")] + Single(Single), + /// multi represents a nested multisig signer + #[prost(message, tag = "2")] + Multi(Multi), + } +} +impl ::prost::Name for ModeInfo { + const NAME: &'static str = "ModeInfo"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// Fee includes the amount of coins paid in fees and the maximum +/// gas to be used by the transaction. The ratio yields an effective "gasprice", +/// which must be above some miminum to be accepted into the mempool. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Fee { + /// amount is the amount of coins to be paid as a fee + #[prost(message, repeated, tag = "1")] + pub amount: ::prost::alloc::vec::Vec, + /// gas_limit is the maximum gas that can be used in transaction processing + /// before an out of gas error occurs + #[prost(uint64, tag = "2")] + pub gas_limit: u64, + /// if unset, the first signer is responsible for paying the fees. If set, the specified account must pay the fees. + /// the payer must be a tx signer (and thus have signed this field in AuthInfo). + /// setting this field does *not* change the ordering of required signers for the transaction. + #[prost(string, tag = "3")] + pub payer: ::prost::alloc::string::String, + /// if set, the fee payer (either the first signer or the value of the payer field) requests that a fee grant be used + /// to pay fees instead of the fee payer's own balance. If an appropriate fee grant does not exist or the chain does + /// not support fee grants, this will fail + #[prost(string, tag = "4")] + pub granter: ::prost::alloc::string::String, +} +impl ::prost::Name for Fee { + const NAME: &'static str = "Fee"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// Tip is the tip used for meta-transactions. +/// +/// Since: cosmos-sdk 0.46 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Tip { + /// amount is the amount of the tip + #[prost(message, repeated, tag = "1")] + pub amount: ::prost::alloc::vec::Vec, + /// tipper is the address of the account paying for the tip + #[prost(string, tag = "2")] + pub tipper: ::prost::alloc::string::String, +} +impl ::prost::Name for Tip { + const NAME: &'static str = "Tip"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// AuxSignerData is the intermediary format that an auxiliary signer (e.g. a +/// tipper) builds and sends to the fee payer (who will build and broadcast the +/// actual tx). AuxSignerData is not a valid tx in itself, and will be rejected +/// by the node if sent directly as-is. +/// +/// Since: cosmos-sdk 0.46 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuxSignerData { + /// address is the bech32-encoded address of the auxiliary signer. If using + /// AuxSignerData across different chains, the bech32 prefix of the target + /// chain (where the final transaction is broadcasted) should be used. + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, + /// sign_doc is the SIGN_MODE_DIRECT_AUX sign doc that the auxiliary signer + /// signs. Note: we use the same sign doc even if we're signing with + /// LEGACY_AMINO_JSON. + #[prost(message, optional, tag = "2")] + pub sign_doc: ::core::option::Option, + /// mode is the signing mode of the single signer. + #[prost(enumeration = "super::signing::v1beta1::SignMode", tag = "3")] + pub mode: i32, + /// sig is the signature of the sign doc. + #[prost(bytes = "vec", tag = "4")] + pub sig: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for AuxSignerData { + const NAME: &'static str = "AuxSignerData"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// GetTxsEventRequest is the request type for the Service.TxsByEvents +/// RPC method. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTxsEventRequest { + /// events is the list of transaction event type. + /// Deprecated post v0.47.x: use query instead, which should contain a valid + /// events query. + #[deprecated] + #[prost(string, repeated, tag = "1")] + pub events: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// pagination defines a pagination for the request. + /// Deprecated post v0.46.x: use page and limit instead. + #[deprecated] + #[prost(message, optional, tag = "2")] + pub pagination: ::core::option::Option< + super::super::base::query::v1beta1::PageRequest, + >, + #[prost(enumeration = "OrderBy", tag = "3")] + pub order_by: i32, + /// page is the page number to query, starts at 1. If not provided, will + /// default to first page. + #[prost(uint64, tag = "4")] + pub page: u64, + /// limit is the total number of results to be returned in the result page. + /// If left empty it will default to a value to be set by each app. + #[prost(uint64, tag = "5")] + pub limit: u64, + /// query defines the transaction event query that is proxied to Tendermint's + /// TxSearch RPC method. The query must be valid. + /// + /// Since Cosmos SDK 0.48 + #[prost(string, tag = "6")] + pub query: ::prost::alloc::string::String, +} +impl ::prost::Name for GetTxsEventRequest { + const NAME: &'static str = "GetTxsEventRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// GetTxsEventResponse is the response type for the Service.TxsByEvents +/// RPC method. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTxsEventResponse { + /// txs is the list of queried transactions. + #[prost(message, repeated, tag = "1")] + pub txs: ::prost::alloc::vec::Vec, + /// tx_responses is the list of queried TxResponses. + #[prost(message, repeated, tag = "2")] + pub tx_responses: ::prost::alloc::vec::Vec< + super::super::base::abci::v1beta1::TxResponse, + >, + /// pagination defines a pagination for the response. + /// Deprecated post v0.46.x: use total instead. + #[deprecated] + #[prost(message, optional, tag = "3")] + pub pagination: ::core::option::Option< + super::super::base::query::v1beta1::PageResponse, + >, + /// total is total number of results available + #[prost(uint64, tag = "4")] + pub total: u64, +} +impl ::prost::Name for GetTxsEventResponse { + const NAME: &'static str = "GetTxsEventResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// BroadcastTxRequest is the request type for the Service.BroadcastTxRequest +/// RPC method. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BroadcastTxRequest { + /// tx_bytes is the raw transaction. + #[prost(bytes = "vec", tag = "1")] + pub tx_bytes: ::prost::alloc::vec::Vec, + #[prost(enumeration = "BroadcastMode", tag = "2")] + pub mode: i32, +} +impl ::prost::Name for BroadcastTxRequest { + const NAME: &'static str = "BroadcastTxRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// BroadcastTxResponse is the response type for the +/// Service.BroadcastTx method. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BroadcastTxResponse { + /// tx_response is the queried TxResponses. + #[prost(message, optional, tag = "1")] + pub tx_response: ::core::option::Option< + super::super::base::abci::v1beta1::TxResponse, + >, +} +impl ::prost::Name for BroadcastTxResponse { + const NAME: &'static str = "BroadcastTxResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// SimulateRequest is the request type for the Service.Simulate +/// RPC method. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SimulateRequest { + /// tx is the transaction to simulate. + /// Deprecated. Send raw tx bytes instead. + #[deprecated] + #[prost(message, optional, tag = "1")] + pub tx: ::core::option::Option, + /// tx_bytes is the raw transaction. + /// + /// Since: cosmos-sdk 0.43 + #[prost(bytes = "vec", tag = "2")] + pub tx_bytes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for SimulateRequest { + const NAME: &'static str = "SimulateRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// SimulateResponse is the response type for the +/// Service.SimulateRPC method. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SimulateResponse { + /// gas_info is the information about gas used in the simulation. + #[prost(message, optional, tag = "1")] + pub gas_info: ::core::option::Option, + /// result is the result of the simulation. + #[prost(message, optional, tag = "2")] + pub result: ::core::option::Option, +} +impl ::prost::Name for SimulateResponse { + const NAME: &'static str = "SimulateResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// GetTxRequest is the request type for the Service.GetTx +/// RPC method. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTxRequest { + /// hash is the tx hash to query, encoded as a hex string. + #[prost(string, tag = "1")] + pub hash: ::prost::alloc::string::String, +} +impl ::prost::Name for GetTxRequest { + const NAME: &'static str = "GetTxRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// GetTxResponse is the response type for the Service.GetTx method. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTxResponse { + /// tx is the queried transaction. + #[prost(message, optional, tag = "1")] + pub tx: ::core::option::Option, + /// tx_response is the queried TxResponses. + #[prost(message, optional, tag = "2")] + pub tx_response: ::core::option::Option< + super::super::base::abci::v1beta1::TxResponse, + >, +} +impl ::prost::Name for GetTxResponse { + const NAME: &'static str = "GetTxResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// GetBlockWithTxsRequest is the request type for the Service.GetBlockWithTxs +/// RPC method. +/// +/// Since: cosmos-sdk 0.45.2 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBlockWithTxsRequest { + /// height is the height of the block to query. + #[prost(int64, tag = "1")] + pub height: i64, + /// pagination defines a pagination for the request. + #[prost(message, optional, tag = "2")] + pub pagination: ::core::option::Option< + super::super::base::query::v1beta1::PageRequest, + >, +} +impl ::prost::Name for GetBlockWithTxsRequest { + const NAME: &'static str = "GetBlockWithTxsRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// GetBlockWithTxsResponse is the response type for the Service.GetBlockWithTxs +/// method. +/// +/// Since: cosmos-sdk 0.45.2 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetBlockWithTxsResponse { + /// txs are the transactions in the block. + #[prost(message, repeated, tag = "1")] + pub txs: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub block_id: ::core::option::Option< + super::super::super::tendermint::types::BlockId, + >, + #[prost(message, optional, tag = "3")] + pub block: ::core::option::Option, + /// pagination defines a pagination for the response. + #[prost(message, optional, tag = "4")] + pub pagination: ::core::option::Option< + super::super::base::query::v1beta1::PageResponse, + >, +} +impl ::prost::Name for GetBlockWithTxsResponse { + const NAME: &'static str = "GetBlockWithTxsResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// TxDecodeRequest is the request type for the Service.TxDecode +/// RPC method. +/// +/// Since: cosmos-sdk 0.47 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxDecodeRequest { + /// tx_bytes is the raw transaction. + #[prost(bytes = "vec", tag = "1")] + pub tx_bytes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for TxDecodeRequest { + const NAME: &'static str = "TxDecodeRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// TxDecodeResponse is the response type for the +/// Service.TxDecode method. +/// +/// Since: cosmos-sdk 0.47 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxDecodeResponse { + /// tx is the decoded transaction. + #[prost(message, optional, tag = "1")] + pub tx: ::core::option::Option, +} +impl ::prost::Name for TxDecodeResponse { + const NAME: &'static str = "TxDecodeResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// TxEncodeRequest is the request type for the Service.TxEncode +/// RPC method. +/// +/// Since: cosmos-sdk 0.47 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxEncodeRequest { + /// tx is the transaction to encode. + #[prost(message, optional, tag = "1")] + pub tx: ::core::option::Option, +} +impl ::prost::Name for TxEncodeRequest { + const NAME: &'static str = "TxEncodeRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// TxEncodeResponse is the response type for the +/// Service.TxEncode method. +/// +/// Since: cosmos-sdk 0.47 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxEncodeResponse { + /// tx_bytes is the encoded transaction bytes. + #[prost(bytes = "vec", tag = "1")] + pub tx_bytes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for TxEncodeResponse { + const NAME: &'static str = "TxEncodeResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// TxEncodeAminoRequest is the request type for the Service.TxEncodeAmino +/// RPC method. +/// +/// Since: cosmos-sdk 0.47 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxEncodeAminoRequest { + #[prost(string, tag = "1")] + pub amino_json: ::prost::alloc::string::String, +} +impl ::prost::Name for TxEncodeAminoRequest { + const NAME: &'static str = "TxEncodeAminoRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// TxEncodeAminoResponse is the response type for the Service.TxEncodeAmino +/// RPC method. +/// +/// Since: cosmos-sdk 0.47 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxEncodeAminoResponse { + #[prost(bytes = "vec", tag = "1")] + pub amino_binary: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for TxEncodeAminoResponse { + const NAME: &'static str = "TxEncodeAminoResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// TxDecodeAminoRequest is the request type for the Service.TxDecodeAmino +/// RPC method. +/// +/// Since: cosmos-sdk 0.47 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxDecodeAminoRequest { + #[prost(bytes = "vec", tag = "1")] + pub amino_binary: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for TxDecodeAminoRequest { + const NAME: &'static str = "TxDecodeAminoRequest"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// TxDecodeAminoResponse is the response type for the Service.TxDecodeAmino +/// RPC method. +/// +/// Since: cosmos-sdk 0.47 +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxDecodeAminoResponse { + #[prost(string, tag = "1")] + pub amino_json: ::prost::alloc::string::String, +} +impl ::prost::Name for TxDecodeAminoResponse { + const NAME: &'static str = "TxDecodeAminoResponse"; + const PACKAGE: &'static str = "cosmos.tx.v1beta1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("cosmos.tx.v1beta1.{}", Self::NAME) + } +} +/// OrderBy defines the sorting order +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum OrderBy { + /// ORDER_BY_UNSPECIFIED specifies an unknown sorting order. OrderBy defaults + /// to ASC in this case. + Unspecified = 0, + /// ORDER_BY_ASC defines ascending order + Asc = 1, + /// ORDER_BY_DESC defines descending order + Desc = 2, +} +impl OrderBy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + OrderBy::Unspecified => "ORDER_BY_UNSPECIFIED", + OrderBy::Asc => "ORDER_BY_ASC", + OrderBy::Desc => "ORDER_BY_DESC", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ORDER_BY_UNSPECIFIED" => Some(Self::Unspecified), + "ORDER_BY_ASC" => Some(Self::Asc), + "ORDER_BY_DESC" => Some(Self::Desc), + _ => None, + } + } +} +/// BroadcastMode specifies the broadcast mode for the TxService.Broadcast RPC +/// method. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum BroadcastMode { + /// zero-value for mode ordering + Unspecified = 0, + /// DEPRECATED: use BROADCAST_MODE_SYNC instead, + /// BROADCAST_MODE_BLOCK is not supported by the SDK from v0.47.x onwards. + Block = 1, + /// BROADCAST_MODE_SYNC defines a tx broadcasting mode where the client waits + /// for a CheckTx execution response only. + Sync = 2, + /// BROADCAST_MODE_ASYNC defines a tx broadcasting mode where the client + /// returns immediately. + Async = 3, +} +impl BroadcastMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + BroadcastMode::Unspecified => "BROADCAST_MODE_UNSPECIFIED", + BroadcastMode::Block => "BROADCAST_MODE_BLOCK", + BroadcastMode::Sync => "BROADCAST_MODE_SYNC", + BroadcastMode::Async => "BROADCAST_MODE_ASYNC", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "BROADCAST_MODE_UNSPECIFIED" => Some(Self::Unspecified), + "BROADCAST_MODE_BLOCK" => Some(Self::Block), + "BROADCAST_MODE_SYNC" => Some(Self::Sync), + "BROADCAST_MODE_ASYNC" => Some(Self::Async), + _ => None, + } + } +} +/// Generated client implementations. +#[cfg(feature = "rpc")] +pub mod service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Service defines a gRPC service for interacting with transactions. + #[derive(Debug, Clone)] + pub struct ServiceClient { + inner: tonic::client::Grpc, + } + impl ServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + ServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Simulate simulates executing a transaction for estimating gas usage. + pub async fn simulate( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/Simulate", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "Simulate")); + self.inner.unary(req, path, codec).await + } + /// GetTx fetches a tx by hash. + pub async fn get_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/GetTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "GetTx")); + self.inner.unary(req, path, codec).await + } + /// BroadcastTx broadcast transaction. + pub async fn broadcast_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/BroadcastTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "BroadcastTx")); + self.inner.unary(req, path, codec).await + } + /// GetTxsEvent fetches txs by event. + pub async fn get_txs_event( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/GetTxsEvent", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "GetTxsEvent")); + self.inner.unary(req, path, codec).await + } + /// GetBlockWithTxs fetches a block with decoded txs. + /// + /// Since: cosmos-sdk 0.45.2 + pub async fn get_block_with_txs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/GetBlockWithTxs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "GetBlockWithTxs")); + self.inner.unary(req, path, codec).await + } + /// TxDecode decodes the transaction. + /// + /// Since: cosmos-sdk 0.47 + pub async fn tx_decode( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/TxDecode", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "TxDecode")); + self.inner.unary(req, path, codec).await + } + /// TxEncode encodes the transaction. + /// + /// Since: cosmos-sdk 0.47 + pub async fn tx_encode( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/TxEncode", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "TxEncode")); + self.inner.unary(req, path, codec).await + } + /// TxEncodeAmino encodes an Amino transaction from JSON to encoded bytes. + /// + /// Since: cosmos-sdk 0.47 + pub async fn tx_encode_amino( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/TxEncodeAmino", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "TxEncodeAmino")); + self.inner.unary(req, path, codec).await + } + /// TxDecodeAmino decodes an Amino transaction from encoded bytes to JSON. + /// + /// Since: cosmos-sdk 0.47 + pub async fn tx_decode_amino( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/cosmos.tx.v1beta1.Service/TxDecodeAmino", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("cosmos.tx.v1beta1.Service", "TxDecodeAmino")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +#[cfg(feature = "rpc")] +pub mod service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with ServiceServer. + #[async_trait] + pub trait Service: Send + Sync + 'static { + /// Simulate simulates executing a transaction for estimating gas usage. + async fn simulate( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// GetTx fetches a tx by hash. + async fn get_tx( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// BroadcastTx broadcast transaction. + async fn broadcast_tx( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// GetTxsEvent fetches txs by event. + async fn get_txs_event( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// GetBlockWithTxs fetches a block with decoded txs. + /// + /// Since: cosmos-sdk 0.45.2 + async fn get_block_with_txs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// TxDecode decodes the transaction. + /// + /// Since: cosmos-sdk 0.47 + async fn tx_decode( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// TxEncode encodes the transaction. + /// + /// Since: cosmos-sdk 0.47 + async fn tx_encode( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// TxEncodeAmino encodes an Amino transaction from JSON to encoded bytes. + /// + /// Since: cosmos-sdk 0.47 + async fn tx_encode_amino( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// TxDecodeAmino decodes an Amino transaction from encoded bytes to JSON. + /// + /// Since: cosmos-sdk 0.47 + async fn tx_decode_amino( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Service defines a gRPC service for interacting with transactions. + #[derive(Debug)] + pub struct ServiceServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl ServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for ServiceServer + where + T: Service, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/cosmos.tx.v1beta1.Service/Simulate" => { + #[allow(non_camel_case_types)] + struct SimulateSvc(pub Arc); + impl tonic::server::UnaryService + for SimulateSvc { + type Response = super::SimulateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::simulate(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SimulateSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/cosmos.tx.v1beta1.Service/GetTx" => { + #[allow(non_camel_case_types)] + struct GetTxSvc(pub Arc); + impl tonic::server::UnaryService + for GetTxSvc { + type Response = super::GetTxResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_tx(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetTxSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/cosmos.tx.v1beta1.Service/BroadcastTx" => { + #[allow(non_camel_case_types)] + struct BroadcastTxSvc(pub Arc); + impl< + T: Service, + > tonic::server::UnaryService + for BroadcastTxSvc { + type Response = super::BroadcastTxResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::broadcast_tx(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = BroadcastTxSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/cosmos.tx.v1beta1.Service/GetTxsEvent" => { + #[allow(non_camel_case_types)] + struct GetTxsEventSvc(pub Arc); + impl< + T: Service, + > tonic::server::UnaryService + for GetTxsEventSvc { + type Response = super::GetTxsEventResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_txs_event(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetTxsEventSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/cosmos.tx.v1beta1.Service/GetBlockWithTxs" => { + #[allow(non_camel_case_types)] + struct GetBlockWithTxsSvc(pub Arc); + impl< + T: Service, + > tonic::server::UnaryService + for GetBlockWithTxsSvc { + type Response = super::GetBlockWithTxsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_with_txs(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = GetBlockWithTxsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/cosmos.tx.v1beta1.Service/TxDecode" => { + #[allow(non_camel_case_types)] + struct TxDecodeSvc(pub Arc); + impl tonic::server::UnaryService + for TxDecodeSvc { + type Response = super::TxDecodeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::tx_decode(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TxDecodeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/cosmos.tx.v1beta1.Service/TxEncode" => { + #[allow(non_camel_case_types)] + struct TxEncodeSvc(pub Arc); + impl tonic::server::UnaryService + for TxEncodeSvc { + type Response = super::TxEncodeResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::tx_encode(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TxEncodeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/cosmos.tx.v1beta1.Service/TxEncodeAmino" => { + #[allow(non_camel_case_types)] + struct TxEncodeAminoSvc(pub Arc); + impl< + T: Service, + > tonic::server::UnaryService + for TxEncodeAminoSvc { + type Response = super::TxEncodeAminoResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::tx_encode_amino(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TxEncodeAminoSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/cosmos.tx.v1beta1.Service/TxDecodeAmino" => { + #[allow(non_camel_case_types)] + struct TxDecodeAminoSvc(pub Arc); + impl< + T: Service, + > tonic::server::UnaryService + for TxDecodeAminoSvc { + type Response = super::TxDecodeAminoResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::tx_decode_amino(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TxDecodeAminoSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for ServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for ServiceServer { + const NAME: &'static str = "cosmos.tx.v1beta1.Service"; + } +} diff --git a/crates/proto/src/gen/noble.forwarding.v1.rs b/crates/proto/src/gen/noble.forwarding.v1.rs new file mode 100644 index 0000000000..1bfadd3576 --- /dev/null +++ b/crates/proto/src/gen/noble.forwarding.v1.rs @@ -0,0 +1,1083 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ForwardingAccount { + #[prost(message, optional, tag = "1")] + pub base_account: ::core::option::Option< + super::super::super::cosmos::auth::v1beta1::BaseAccount, + >, + #[prost(string, tag = "2")] + pub channel: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub recipient: ::prost::alloc::string::String, + #[prost(int64, tag = "4")] + pub created_at: i64, +} +impl ::prost::Name for ForwardingAccount { + const NAME: &'static str = "ForwardingAccount"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ForwardingPubKey { + #[prost(bytes = "vec", tag = "1")] + pub key: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for ForwardingPubKey { + const NAME: &'static str = "ForwardingPubKey"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GenesisState { + #[prost(map = "string, uint64", tag = "1")] + pub num_of_accounts: ::std::collections::HashMap< + ::prost::alloc::string::String, + u64, + >, + #[prost(map = "string, uint64", tag = "2")] + pub num_of_forwards: ::std::collections::HashMap< + ::prost::alloc::string::String, + u64, + >, + #[prost(map = "string, string", tag = "3")] + pub total_forwarded: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +impl ::prost::Name for GenesisState { + const NAME: &'static str = "GenesisState"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RegisterAccountData { + #[prost(string, tag = "1")] + pub recipient: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub channel: ::prost::alloc::string::String, +} +impl ::prost::Name for RegisterAccountData { + const NAME: &'static str = "RegisterAccountData"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RegisterAccountMemo { + #[prost(message, optional, tag = "1")] + pub noble: ::core::option::Option, +} +/// Nested message and enum types in `RegisterAccountMemo`. +pub mod register_account_memo { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct RegisterAccountDataWrapper { + #[prost(message, optional, tag = "1")] + pub forwarding: ::core::option::Option, + } + impl ::prost::Name for RegisterAccountDataWrapper { + const NAME: &'static str = "RegisterAccountDataWrapper"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!( + "noble.forwarding.v1.RegisterAccountMemo.{}", Self::NAME + ) + } + } +} +impl ::prost::Name for RegisterAccountMemo { + const NAME: &'static str = "RegisterAccountMemo"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryAddress { + #[prost(string, tag = "1")] + pub channel: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub recipient: ::prost::alloc::string::String, +} +impl ::prost::Name for QueryAddress { + const NAME: &'static str = "QueryAddress"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryAddressResponse { + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, + #[prost(bool, tag = "2")] + pub exists: bool, +} +impl ::prost::Name for QueryAddressResponse { + const NAME: &'static str = "QueryAddressResponse"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryStats {} +impl ::prost::Name for QueryStats { + const NAME: &'static str = "QueryStats"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryStatsResponse { + #[prost(map = "string, message", tag = "1")] + pub stats: ::std::collections::HashMap<::prost::alloc::string::String, Stats>, +} +impl ::prost::Name for QueryStatsResponse { + const NAME: &'static str = "QueryStatsResponse"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryStatsByChannel { + #[prost(string, tag = "1")] + pub channel: ::prost::alloc::string::String, +} +impl ::prost::Name for QueryStatsByChannel { + const NAME: &'static str = "QueryStatsByChannel"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QueryStatsByChannelResponse { + #[prost(uint64, tag = "1")] + pub num_of_accounts: u64, + #[prost(uint64, tag = "2")] + pub num_of_forwards: u64, + #[prost(message, repeated, tag = "3")] + pub total_forwarded: ::prost::alloc::vec::Vec< + super::super::super::cosmos::base::v1beta1::Coin, + >, +} +impl ::prost::Name for QueryStatsByChannelResponse { + const NAME: &'static str = "QueryStatsByChannelResponse"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Stats { + #[prost(string, tag = "1")] + pub chain_id: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub num_of_accounts: u64, + #[prost(uint64, tag = "3")] + pub num_of_forwards: u64, + #[prost(message, repeated, tag = "4")] + pub total_forwarded: ::prost::alloc::vec::Vec< + super::super::super::cosmos::base::v1beta1::Coin, + >, +} +impl ::prost::Name for Stats { + const NAME: &'static str = "Stats"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +/// Generated client implementations. +#[cfg(feature = "rpc")] +pub mod query_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct QueryClient { + inner: tonic::client::Grpc, + } + impl QueryClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl QueryClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> QueryClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + QueryClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn address( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/noble.forwarding.v1.Query/Address", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("noble.forwarding.v1.Query", "Address")); + self.inner.unary(req, path, codec).await + } + pub async fn stats( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/noble.forwarding.v1.Query/Stats", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("noble.forwarding.v1.Query", "Stats")); + self.inner.unary(req, path, codec).await + } + pub async fn stats_by_channel( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/noble.forwarding.v1.Query/StatsByChannel", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("noble.forwarding.v1.Query", "StatsByChannel")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +#[cfg(feature = "rpc")] +pub mod query_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with QueryServer. + #[async_trait] + pub trait Query: Send + Sync + 'static { + async fn address( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn stats( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn stats_by_channel( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct QueryServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl QueryServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for QueryServer + where + T: Query, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/noble.forwarding.v1.Query/Address" => { + #[allow(non_camel_case_types)] + struct AddressSvc(pub Arc); + impl tonic::server::UnaryService + for AddressSvc { + type Response = super::QueryAddressResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::address(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = AddressSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/noble.forwarding.v1.Query/Stats" => { + #[allow(non_camel_case_types)] + struct StatsSvc(pub Arc); + impl tonic::server::UnaryService + for StatsSvc { + type Response = super::QueryStatsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::stats(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = StatsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/noble.forwarding.v1.Query/StatsByChannel" => { + #[allow(non_camel_case_types)] + struct StatsByChannelSvc(pub Arc); + impl< + T: Query, + > tonic::server::UnaryService + for StatsByChannelSvc { + type Response = super::QueryStatsByChannelResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::stats_by_channel(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = StatsByChannelSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for QueryServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for QueryServer { + const NAME: &'static str = "noble.forwarding.v1.Query"; + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgRegisterAccount { + #[prost(string, tag = "1")] + pub signer: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub recipient: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub channel: ::prost::alloc::string::String, +} +impl ::prost::Name for MsgRegisterAccount { + const NAME: &'static str = "MsgRegisterAccount"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgRegisterAccountResponse { + #[prost(string, tag = "1")] + pub address: ::prost::alloc::string::String, +} +impl ::prost::Name for MsgRegisterAccountResponse { + const NAME: &'static str = "MsgRegisterAccountResponse"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgClearAccount { + #[prost(string, tag = "1")] + pub signer: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub address: ::prost::alloc::string::String, +} +impl ::prost::Name for MsgClearAccount { + const NAME: &'static str = "MsgClearAccount"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MsgClearAccountResponse {} +impl ::prost::Name for MsgClearAccountResponse { + const NAME: &'static str = "MsgClearAccountResponse"; + const PACKAGE: &'static str = "noble.forwarding.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("noble.forwarding.v1.{}", Self::NAME) + } +} +/// Generated client implementations. +#[cfg(feature = "rpc")] +pub mod msg_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct MsgClient { + inner: tonic::client::Grpc, + } + impl MsgClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl MsgClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> MsgClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + MsgClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn register_account( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/noble.forwarding.v1.Msg/RegisterAccount", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("noble.forwarding.v1.Msg", "RegisterAccount")); + self.inner.unary(req, path, codec).await + } + pub async fn clear_account( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/noble.forwarding.v1.Msg/ClearAccount", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("noble.forwarding.v1.Msg", "ClearAccount")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +#[cfg(feature = "rpc")] +pub mod msg_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with MsgServer. + #[async_trait] + pub trait Msg: Send + Sync + 'static { + async fn register_account( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn clear_account( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct MsgServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl MsgServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for MsgServer + where + T: Msg, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/noble.forwarding.v1.Msg/RegisterAccount" => { + #[allow(non_camel_case_types)] + struct RegisterAccountSvc(pub Arc); + impl tonic::server::UnaryService + for RegisterAccountSvc { + type Response = super::MsgRegisterAccountResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::register_account(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = RegisterAccountSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/noble.forwarding.v1.Msg/ClearAccount" => { + #[allow(non_camel_case_types)] + struct ClearAccountSvc(pub Arc); + impl tonic::server::UnaryService + for ClearAccountSvc { + type Response = super::MsgClearAccountResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::clear_account(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ClearAccountSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for MsgServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for MsgServer { + const NAME: &'static str = "noble.forwarding.v1.Msg"; + } +} diff --git a/crates/proto/src/gen/proto_descriptor.bin.no_lfs b/crates/proto/src/gen/proto_descriptor.bin.no_lfs index 049d2776782e15931731ee25bc163b11d42ee9ca..5127703ec3ff420ed8031f1a9a75fde97737de06 100644 GIT binary patch delta 86371 zcmdSC3w&KibuWDO-sc=iTaqPh+md{2OWVisTQAFT96L@NSytk=9obGwc*sZ6v2*#xwkdSy0SUKBAi>ZBg@imxA-oa@B_Xt56QG51OL_MJTqqDI?Jd2~66gp0n(zN# zGqd-RESbP}dw;*rzt}lz&#W~wYu2n;vu4fA4?nZ2{H}Eg_rE_9cgxp(FS+Wjr=Pc; zdir_$&v!e|ce>^K-shz5Z)++y#jBd_^xpe7U$$gNf@T&3i(*;IsBkbUlYZ z)yEC7Rc=iRC0*h<{QZ2RYsX_=;*{)!>7)}=lTl)3>RgBa&g4-cQ{LbySmo>8`q7;V|L7ozs<6n zQ=fI8{C4A4&Hi_?wFz;ldz>?Sl zx(`LE&)yx^a1DDf{-_n}aqHZ8_0#_hG(vamYI$l`i*@|-cm2*f2Y=)En|SJX)+HMY zyZ&a4+cuWZk4{>~dW+jm$Z zZeG3ldX$uoS~qSkcQ3J5Ej~S!KQk3logV&o%ew#R^)-LD+D&@#wpiiA|1^?3|AEZX zy1hoqR>$_9|9qycuFDA1npoHQpJ%SV{sAQ=@ocPMrS5C=`+QTTU&u}uvc>FFspx0? zVs30IJKEu&%AL)Qc1~oc#!BOUaXK^Q=TG^i++;S&o*i}1zjwGd@TW3U`C@h?KQ-zi zwV0on5hnQiN`9{B=cY!p(^>p8RU!=tnLm;N;n7S1z^D(rk?b@IpU9og`svZkxwPx& z3x0YsKUEq}cWE+(>_n!N9ra6j|2V2qEM+FALEA3{3i(N(gynYm zgPEzZtPq-BhR!~ln?&U%&iR`(Hr{CkRUF(MliojKV&j$auRptCB(Nu5KmHuX3 zi;7fTHqyXl^+#uVt)HDJqEBYmAfQ+leUvp%4N$L()u8htHDK*b1$s;k!0Carkjsgz z-@C)_&reRzod03g3!)C)6v^GM-}ie=#~Mb?>i?*a3ds=e2D3W)%Gn+5Z@Pf>x;c~% z#cqS5pb7-}P}%npP6clR$n-2xtK@5hF)vG_PK`2NRmuB`4y-JrQhHSTFCsY={kBE- zTPYj$l#JX_4Db>ZPZ$3g%}$gu9KA=+mByj(^$;s@L|&Nj@Omh<;+0!E{K?$ZOetHu za&ymy*@F^zrgzuRN$R==+J~Jhg|07+o?m%-Lo|;ZIlVH#tTUc1KnX&JWuW!)!#8C| zN*(?ge9ek(ES3tnsWJ9~C>^TH;>;;1c|ToD`|F{jb0e6#kVOx_4b;spH5?{~5tKdZ z51$iKQ!|spC=9AxXYC05Gdau>!eE&OihlMi=aeF_GDYf7C{oOenF)i_!SYDlhD^w{{CY}4*RF_1t{JCh4|?$#WY)-!{Cd^QMqrf zpyn%LE>(S0$Jyi*wJtNdHu%%SR{H%AJb_yVXHP57nK1yKkM{=nLS9hj|dbiZk7 zOUudGOHzMj`Nw1`4mrd5m)%w|7kBxGW{M?0Kx0HkJGX4!+Oc)pWt@Td7xQ1<-!$hV zm^U&pQ_S6x?eY)jOW7SH3&lB{pP_VQATcAcfQd$+-IcS&&Y{mzN zPHHJW36m!aF=J0>{ONo#R|1P@2~1_jFf{P{BqTh@7j;f0!bTNC%2Ah!5`j3GFJvJ) z{#1Ud^VQix-WU6WLMuHeCy6%9tC@}={`zTdumdx!q3 z`n}>qMW35~u+R@Thy4I^+_@RnxauPd{cv+*({NJk*e&kAE^}9wpgXdK$;RANsauv6 z-P4&uX0ljre052Lp+7$bE3-ILJSvFZ7B?|Gksmo-Zn`(Ir2Oss6SXH* z%Aor0?R2ZNx8z_rj$~VH-&$YW>v!pDpcY+faWkNEXw(qH+vV1P-#Mt`e4*75^i}0s z?@TOj(^;crZ@XJ{OSVviifWCQpSvf~8eFeHozx7)43MGD!>F|#X9 z=2TQXy~}O5-f&JgH0{<;&kPTp&Yl|*wxhAm`kKL7d_0gncbrS*9o0`i)NcI{f76@X zdhjnGKQ(@%>tv>bz^+25I;ZTUW{2dii|F@E3K8@}(0LLk(2&NBJAtQYK-S<3Bf= zE1lyQ9}Xx-MVSTEBv4n~!g|PR+o`vGqZE@KATu;MThv3ev@%7F>)YBj8j==;Xco{G z*^OpIGtEmG2xyCf5YP6Uh7ivNUW47FbK1abfV8OMY~VG7XaE{RGzVynwo&8`&>FpE z8qHx&pzQ=-vFu+GTWO_w{9t!9kRql|EYio6}P4@7XfTs_5U?dp#evasXCR0^h`{-uT>1ZCKwl_4M$(o(Hh z00^~jfiL4)DH2I3OwT){NC=%j4Qf)&1ciqk1|tXnQT}}PlT3D z1)Rb}c!W)^@thTQeY)R2gW_li?#q`lhl-fmvgI#4R@Gd7<0leL!OX-&_ttIOdtDI* zs|LJTQCp|lSHl)nK0pr8ZYK@=Fjy4k&h){M;_T4T4TZV`4tuMx{oP~Bukfq$I+ohjx<#HyQ_$jC&A zQ8j|!Vw#r5=}ssL@1Y5eGV{Cr-_8Wk-=r^Hf%y+?dM&C%xkW9&>NT3xfKbIX)oLsN z0;|`oY;zyArNWTU4!d(8^%i@@nl$lN)l3&0n68ct_BG2ahQWG6e()t%_}PB=w6bEVGVY^V5xHiR6dcImJU4NV;xwC3@_~GB=!XZZK1XQ2K|D93R-><9E^T+ugs<2O3ePGMI$MI$(*G{6755^GnuDKGfFRF!@ly zI#r%?xxEH%*(8471qZ5tCTV+^bT5+#a>{b-Bu4rb_9f+^)A8m2ZNH*c=}$+7+ZA5B zNCO~XU6Eex?ncw$e%%$@W2G+hXIWd(WWZjRW?(R(_0R&C7ckIiSZIBN3B}n}y;SNR zN8PVRjRICat}0KMm1kO?u6(>oDo>D=k5fza>nRTaYTpmfHdO~8(C@ENdK3_9-`~*a zj-&PoFTO8!&`Q0+ue4scVh$i@GsBQuO6eT)K`RdA8W)=fVu@*i4c(i~K;XK<{G_TI znLs7>RWFg+CRp43_6n&V0MvHBmNP)exW773<^e!dcYkWR#zWeH5Rc~nfe??g&jE0e zZBEGSb0D=mH2**JEz4aV8Q$HQ;gOtL0~kkll-PwtD_6ej?u1|d;S-5A{!>@pduO7F z-|Fi`c+Jw`1)|F<<`9U5Ow6tDvA&W!@CMx4;p|v$N-YLjs{}i}P~p3o zRSYYTa0O8yuBArCvm>X6N@rWEg+}{AH2RtE_*oFB8VS^IascipEP@LpP;+q(>`^&| zM2$u}ewGU|tRZzfTp=-QJy+Rl15=|??*O%^29|2X8pm!G?hMBs0<4D8$z-MEt1A4--YQgK&c;VLpLs z0J8ZJcyuDNO}~n)2+>xg@W-~v1~_yFeb5F4J@p3 z4V+%&*6Ez0RnqraAu1IHQT6O3D_vk}&|7WRswSQOjQC*PFwNiV)r^KYvUa+6ddOsP zkmX2x(8V{CW0hOSQ4(f}BMVmH{HC*{|I&6FbtU(O%I9)7POG)mDqnqHVpAXhmS<22 z&{IRXQBk6(uE6_7y@PIJxVjyZMQzczP%67qlzB8`srCC{Lx`$Ac-OeDtbD~zB^9u#b5=h2hRXxZwR((DXTpLRK|6K_4o`E7f9lHS>rCE;>~3y zhQ_nGvGI~9U1zXfeQlZ8D__&2VSkF&Ex-3ui5s@uVY^M5fsr=#JU7k>Tl%$bP=0%t zxyf;vw9AX$l1P=Gc_y*+@Bz1RA_GOE^wdzrT(i8AbJE~`(NdHg-PT1!jlOy(${&6* z(GtYrYpLzMaIQL(D-LJJGq>dO1x;Y_@VQrKrm$GbhRiIc2i--wJzBGSi(DI8oc^l1 zLHzQ1ARchxh!iZkprHM^E{%8`nFb|mIvq+`sO{&HO>b};biqcQmskE-Vp-7O)|(0m z2hz>%68%A|U8Q8x@CvJ04N^wVINl>$%k>5`7*0IVCV!Uq~! z>631&slpuXS^n`S;;H@3Zc=SF2$SjuHRLUE6G~HawAK$QAU)(R)71K?c1>MGLzg$* zlSl;$NmW82c_;$@B-ns5)M^GblzyAzHfw5gbfyO%5uIeE*?uw?-;Qf1Bph$0vvfc@ zXf1zwCtOSr0oK(+6@scq_v#DSIB-^MyU|E0wWu{fb7dwDh63yDacd_dfy6Msn9%O1 zl-f!>azT$-4sAG1733KemJ}7%u2fi$yJRYRwlox}mm%rgs`4A(m{_)S17{^pFPNvX zvCkc;*6Q++Hzk^bH&ilp*7V?MmeIqhS$51_qUnszVj8=zw)Z6rY|o$@e(lYb<}%E= zD8m-wSNXo@5=+wmr?~dj8n=2-9390hY{HuCS%z-VZBAuU+dH3KO1DTcJ798RLXOa| z2O!qG4654R`HW(XE~bB=ZU4)R?ax71+dH2bsLU8KCX-VBw0+nY0#I2MjH+4p650rUl9CSu9YiAG6SyZy|7Eo`rz2*w-&eea@@~ zu3WGZ5VDxHW~~G~8O9f*J4>-Ey0bo2E(H>eNmwi}{j|n03X4;Gwot$(q7|UPnn4An z*VA~^d7+(VuN!798^qpGIn}!1X)D5|uAy}alQ$z;v}RC#S1u+v&u}pjL<bv@(LdPKaETmvm(cRfe?~vyvMkus5Kk^l2+% zQmE|aT`{ORVb%cujaCdaUS#k0m0}*je_5iNDc2PmxeKlsw88XD1PD zJE-x~-^5n9DZm)y=zj*bkT$Hy^5}Dkp7K?n#;#7A>td5>=nRbd?5KF2oWY7O*wL$I zIvl>EdW{pp6;-bKO{*NJR(ObFX8jO*f~|PEk7@>^4;6+hCzjA~W#y)^=Yh@9bFG{( zu*Ym*E{G?=mwbL=NDVH|1~r3q`r`lxT>ABCS&OMY3a1o~7R{~y|1m3lkF3wN-Y|}> zV{TJu!Vb;I^4AF3g|qbhQr~z>VD{|QXefFwb6ditxLU=Bw3X%h5{Bl|kb*4c4e4k& zD6^Hr0im9<*{uiXOC1mUxI9(-P>YOE8>-cj(uRc{OgLZ}*YG#^)q0P!ngA^+>J6Pz6!I6n9?aMObv^5@X8!+l4O?L9(nE{3WWZbu&zh;C^yt(#@J zVEkRUc1%BPRc4=MeRxN(;)mC2Muqq+>fLt1q!JZ03$x3(H|g}6DZ*8N|6>az+gZT?8Kk~xPfsC!Ua|L%;#|lu19i(H%nzt=UneXC&B_{ zTsgapS^7z4d_!rzqJm=j)ow!64poSFv?$dusQBA^rR!qHHb(Tyh2#01s#s z>ZNqMcSZSc?@Dx*vv(yLf)I(SI7GU|tv3AkuBwc(QwCq^0x1q>EM2vV$U9i7>d1+5 ztcbxX9H|~`Cs?wQj*DhTF^X7Mi1ek!r7eCNH19p&%bnP|2W z5A`41H*om)P~Y+6eIRZ3{?c_D7(>8mOotG_ zr4t|ElI=p5y7sd&HFa@slowp%CS*D1&6g3*o?=OoQw#fg6gp}ni@*hI!_*6F6L2CG z-9@rKwVi#mvn5BYkfmwwbieVqNxMM>yIMQXm*%vpE5%B`PmC;zJbaJ4a$qJL|Sxo@~I zn;r_7W&C;z7BDND6fk=^Vd#hX0`B8`H@Qt08nrKdyF4}D-uFVb@}%ok7{fsxu3hZ7 zd-IWy{;kNY^QL~Ki><=L=3#Q4WnjX-tk_Kt^g3()t)bM#++wm(Z|p=>%q;V=vY5H0 zgc*Ce$$!ye!mP}>d&7Z;3#z&y$b<>rBnv>npxRppM6SQ~3cyD6dyv%Sud_%J|7YsO)bFj&1M zzD*_BF~o(o#7!7DGq$v)O&U1bvh^~JhNMz9Xkao{Zv zYst{VW0wUCd@M--@;ky0nB701WYvb7hE^6v)##lfYZq zw$?x>Ze@su;KsHPtqN#urge4|(AvVrRROInL_=h5dpx*YldJ|_dwj8hfY;u(&OpFx z-?~lr9U{{GxIwD{njbd@25Nxjw_T#qYJld4Xt>5;RfvY87=T*cxO5z4h*k%*)o~Ry8MEtvwz_Sz5D5SRZ*_=Q zj~g69yn5hW5?`r{s|VgCZM_Bp+9e^{BE*r0Xp4ZhCT`AKECSk^wl0mf2xx0Uvp?@fv`)7I-o|(FN;qC`5)JWBu3bSpS&B`d2_0FN9p^E5x>Migj8- zfDxaYaFC+b4fIKh4qHj6It|gGBVg5Oh>jXHG}UQ{j>Su;7IAjK>5i$eVGg}+M5wYD zL^N-XU8ahmZrv=!Fi05n?6uWl5&#$@y(lGMPpUBCUR{g|6YgD>lJ+?yz9qzC< zqX!Vi=Z@sn5-3kdBIkGL$a#MFS4Ph7ShGvm6la^Ru+^{xfO=n{>kWu$t5twLAfR1w znHtUUzy_aohImw|J8h-28Ltm$^%h-svT3KmBXFgCgHCdwD6h0t^n2W-5xv1?TyGTZLTnCpI9O&BWXx<8Da<|Nl& zqt0ikkn4W<4LL9oN$oj%LNxZs9^E5IVj9t?|5VKNo)8Z`G7#cXatFfVNOPb@&9Dg( z+<_1cEVw2_qu^d+Cxy=m>4R%(5WpyK`q?EV=0@{H9c&rsb$oqjU z*NG?)5_vyx=}}DGs#VGb5ZZMprL;I86mlpm#6cm4!a}qZA3`DEa01rT(bzFzaDse0nrLK` z61-N&xh}A`v)+V|cAcFt5ZZfPoyZl_P~3HkE+=mU`qCY!>2(b5c$|1{-jR17rag$8xQyq<*eg^u zszKsL=u%drn$>u@K4l01)p)tC1|Vd7d9~6&fI#l$^^1gDjTe7a?4%+``{Y&npk)oo z4F$F`c>salkfsL+Ifts%hyet8LrtE{d$41MW33?V#A&t+Ja*bjt;Jap1c=KOJUXMHZWGCAVD(>XFH2o2qC1(#rA!nUT%|NsRF{Crc{LtHWAGr70y);H&-hY2Km*ly zt*$;G(0?svVx5NSzqX~-{T;|@18Nw*)v~vyrhSCAa_!98 znA?bBl^`Y48ope{d6&eI{dUVXv@x#{G}VNC1`snFhy~1yvTnCltkc9Xw*d^x!sG@p zEKAL9_!5={;&-4IG&apBfa<2V23dC8VXeSvePI}v2X{m~NO(c~b(TG?Q?XPgxZHrK z*Xt~p#|k$A+}B0*N}yh^v$p0n4-$$8uVcLwR}x|&BhT?E%@vS;eN@vVYWn(XqpHIC zLGp?oU0f2yy`IG#5sqSVyvwpzruukESOJk}B4bd{>TG7Y=~kyXP=UU5;!t&qV1=?$ zh2lWLFa{GWKum3_aafuIT+St`9dGoKziY+biV?<}>#)BO|6L11j9uG^Ar`;evQ;es zf%^be*9L%5%H2i;0Rs1Kt0g6+07NNwTXHy;q>y-zWgpRrn4k&P8BkJNaGL{q4tark z4^cG;?%!ke92Q<9Rk$DE)8<3Eo{;Z*BjPsd2~gFu(WbQDt3?pRS(AIM79}MBQP#at zS(NsBqq4NL-^-$|6P6(Ro2UnZel;&a6OjLt*}`cIU7Uh0sqfSZ_%2tyME$2?#4%y= zG>-1BMjHk+iK^}bh#J1h$Ra=~Qjg_EHts%DAkfU=7yxW=x%9rzi~(c@?tNwqG(zdd z?>9pL5Euhc4FP~ay+10JL*RZh1ds~G+;4^ez(9?G`^^|&4z3cBNsoa>GXBkG3;;su z08LFf1m0|h08)YaW)^o;*o-0Y77l^k^A7?4bPWz0D;}56Jep|V6*7~u4^&eOW&bTk z_8V=={#z*f*FyH2yyWj&vBxc|cl&&{CJ_653s)#DLM5BvtW4sf$q!nN z^aWrf%T<#cJxv@v@3i1>p%eoGHvld*Ao9M`YF}?a)a#v=ELMpcAoYO=m%|01Qua-p zW&yS%Qb{OtxI7TyVlX@yp|TSIstYrlMEW1J+AqP@JR6y?%R4x^pxGEuRiltn> zD=L<9`K|~Ta`{kHEF72wO=~HZ4_WOSbg>l6hayz;*uxPj#WDbFs!X|jII1$`^5F;< znIDO8;m9N?(+wIx8GFQP->8eFj6D*eLdG7AP~prZxLEQAO>}1hZeOQyDL9WtxEKw8 zV8yrVL^w1Fn$}_^0WfTBHBkS+x{Ur3_AM3(?~YL6)Fi0pVk83f-BvrUj8T0Hzb4=i z*Y)Cm7~%4g8GxS7izNcUb{?r`@4>YRFhs5O;_r!2;oKy+Oy|W)0bsi_0h;XE0J!go zaItuJZ-fgECqaXYMFPMO7cNeKVX?5eAB#}o6&cVkpSD(`~ncD?~hO$z4!-$2)7Zq zABb={Lw+E_MK~8gv{uvuy7ohstUJ-saFzmsM^w6l-qKkhhN-0bP(+@W{D>9%C~npD zc!w9xKd9S$u)NES@SYV|RY3oU1&0ZR$fIs~(!zR@v;koR0aTad0z}>?E!g2Y6?J^l z!jh8n{bDcqF)Q}8p6?eU=f^BDdE@cLr23Qv?`%ECKR}QLtjGdH=BF%pPDvIZ$UbGk zL(7Hq5-yJNq=n0TgvkO#6qYc2?9cK09hQi zY5*hipIGo*>134mC%n0r99YU}{7)^odsHIUAp})bktnIUssMrdr&f#dlP;w>{A`5E zSrMQ?CE;fS3octlnJV(x2p7;NEnG&V5;=1NRFln8nYlm7PSUB88TS#X8bd>VDnjMN z4bY%cLw?GtRwBAIkeT~amf+g)W-s~YR_t?{iOtCR=SCEo$;8iCa6z(6KoD1e%De%H zyq~e)SJkPg(PxYnYi3hFYr(Us61lsZ6@L3D~RkpsFO#h3hnq)da)p{g?_Fq_V8!KWq_0M0ZdfM?8FZq{N>~D1I zTafiHm2}19Eo}YgjR*k(S%9i90V40`jRXM%)z4Fc4xmOYEb|MNy(zUpUE#$0G&8s! zv-b+WJLu-sCVm>R(lgmKR&m(T&5U-rX+1t?4Nuk?)KE0YnW8CyU$9mxK|v~t{(`k( zqw7o2V8$0w^n&Dq7MmPG9z&0{{i3yUm2e2Dp!Y@6BXe5_eF^k*BBc_bVlEPa`Xy_n z5($8kIYdRLe>p;>R00ePF2(Z8Q8PGke>o}^nZFX@!uw6opmN^+N>nW8?XN_rkgcyq zsGPR}uFwrg(*F6XwQ{Y2f&0}67Zdl_B3v4L08O!+x4&ksRK^jm+5+m=B2-A}Ut4yE zrdthEfQHAMxBps7rT%NNIdT8BwSk)%cD&U~{+$*3rk281Wc@n}cRSG%Y=smizi!#z zu+;hnFt~~jY6$`m&HK7pDFFoX*R7@&sRck3^K}cir%;dpqK?5gtfX4+5T<)N{tXNJ zF&td{@+0+S4HrOme3_U0mKFOKUA1MP{w)ihM~cQW4v=pftpf-x2B=yL5P83CH7Uad zAjO&}k8mmB02QN=D5)VXCH(J+O9@}b;r<<KzY z8z33{^xccoqtqjdp+~9zZqzhN{dbi_V2Sx;utVHk!Mg;jqMDPceh#(t^LHQ3BJn*Wa9Fzb}XK+yd1MZhI zsVMFrEcoCRk5JeTBUBE`0I9OnETfVl>;0g zxJUUzswC?X4iLEiN?eZc6i4{KU2H@m2k0S-sR~Q`H#5SK3UdDzkpuM8kQ`1}TrkC^ z#7}}eh8{=wPot)Bg#R>Z8pQtvGs06c!T~D500`6<%m4=n)E9_K-UGx|Ffsj|_y4yQ@07uddnLX2FU{ZugysV@UBq$!OEd0~iUNPB#;zS-?j`@zirrx= z&*O6B{7?F6s1lY36zUf(8&RF8+H#ps0jg#JM6+HrmMK8szG$t!)MQ2}FIrb#Wk4|b zMQhh9r3J{W+VrAz^($0a6w6y}dza2mtM^ufDkyVlxeS6^?bS-#0|fC~Z4l?gzZ`=g zd7EwDt{t0zA$Is+DoXFmZMOE(BNav6X0KLm5P+a{8)>OV()W0~E%Sa*P1>rax7&K+ z=NIAr?Y5ryS9rkLbyvwduD^-9FT;+G!t!k~gf|Y&)2x(j@34jKdI^qpB7JG^^+4Ba4YR6Z4$-C{? zy}EiUk@If5nh{R%_)1pq9>Zlo;2S_y!IgHv9=^xMHXg|$8Myb@OO)RLAWFH%_HjuO zbYq*Be3KpfeO*c$a=yuw(#BHmv-M5~AW#LUq!S?W-e>EX1t6&2XJg}O5kr;Q5OtY+ zvu(fKR_;B($o^)#s@}y>877nXc`VM-J^qO)c|nDo#)&&z#XXG69erN(81BO%33n*U z&4M^6;8rOLAPEw+(FbVN^Y)Tpsk>Phf*fyQA*=jjW=k(K!pkuDcnWuH@yZFKgR%K) zsIpIhQ}F>*-(uq;$R14;@LNgL_xt9pX>egxDF(o)2}|Jw@KzGNLK6l1evd>qVLs$Z zIXJ-K9WPF2N3b8-UTR zg6j|1;uW#uzL$K+j(t#*^^x--yP6SJ@wiX258HAvWr7ff0zfqs03z?hHqOd$y!jY^ zAp5Y50I^QNMd~BKO}G0JiH<#wJY-!?P0Bs?Foh_IYWVuHSBd-~h!#9z-)-9;&|@4ha=+WI!o{8(M}z8`D+xzQ4T7ejB*ykQT{#_)2oYth`gUINt;2gRFzh+9S9Ye zqoPQKLmF5(stfHcA3*l~B+DVa%1b_O$3Cpb-YVpM+^$xxzf}~XCv5Gk1cX)sRPqZD zwRyrA2>^i_JYnmVFkrC#30s_(HcY?dlXmPAx|B4sK50rxvy_k8+KB=PBOIV|*8)V| zkJ{Sl01#9^YQyQkaggRX_?T@!qsKuS`WM+hW>+mmdXniR{uH*0rSSqI13oCJmJi^@ zQ}!YpPSqI5@N^{xr4}EQI1mGVJAA0X0Q_;m=mCeBgM$}eA`}Yt$BA+kSiagzK5NH5 zWm~;H7w+HTWZwwZT69RS29;-FU5O%G&6Yf8+iD*H5V#LesaJr=`<%T*Ey)3bP0!gY zl+m)9)%m22@Hc8MKs~Z z?e;4~oG$T_e`d%2yYA>qKwBrEZn8)8H}K~&TQQB88mgrJ}-ZXvdGFBSTe1^* z+`2Av6pn9aaGNcgkBcGYDWZxdVdCT|809XAj^%ZP-^{tY=!-toDD$kr3{O|WF>#RZ z4A2Ov2z&}A5txMVBb*K^=}GfJp&Lu^JTkM4J?2l z_GLSzHlhK7*q2F+OHqJWihkMN@HOZou0;7ky5=jeal$XM7W%3k-|QYoU#*3I){gxT z^wl-~050i|j>pSUsqjX2+)~~tZhe0`lLOx}#bSOW2OdT@>I;W7gVv&+f5$}?8MKxR z`ns+6-2j0>02PA(BJbC2+~F<$6ToQH*KOH&BQ8?E5#e$j1<>GWz zdIA#+0jgpMK+ydc8>jJ9sw@Nkg-hb&QVvpo%z3BRKPu1hFd&Uj^>EkBfBAYIjKq{v z%6Cn{aAidDhAZ*fK{5NvE4XuZ95av7@_6+M7etskus=c8)PW=6$7Jf24Mfx*1I1Y$e9;xd!Mui5OfegH3b7i-WTjPH5&j# z7rtQQRXjExU^UzzFW4LRN*@xIK77I6bgjtc21?J*Yk@r?dR~aCv-sh z>L9aQ9bM;5tn+P-_RRr8odK#k1C&%T{=~oNNG*cvqB{gaE<~t1{D4zAMTM=nYY3;sO*g03|l` z4Jxl4?|=)=iM>ttR0ne2?ckIg^=b#}caNjv{{W$<0IF3wK;#YXad77v#R)K)eUF1P zXyj%G%Y36_Z%-|gVa+22>hYP-1XSEaMvz5LB>(^gQ40V-P< z!r+w9zR_8}&BfIwC>M>p&#@Qd7z5RbItak$CzXjQ^?;3&9~qe`6!4<1rpqn`ni`uR zf=b-y;K+q4l|6c&gUJyWWS|JgOTO8$&pUmoV2hN3S9dTe^3ffc@o>L6OVkfuE>t9g z=Rigoq9BSQ-|XlBV}K~~&CY6N{s2U~&pSAQM{9?mjMejjqg}uJB+-@U9UQ{@2<`6< zx;@Gc-aL8~r^4lhno8wrLWaZs5PKW5$VdkKfhFiyY-p+XPDb;XUgYH#dMd%(T)-c= z%VZ)l+cCTIsQK(Sfo2^wuQ+wCPJ(w|y7hAHdPb$u6R5T63313dSlFnZ;6_XthmYNT z(2|{AP4WRJ_NZfBFfm^0`f>}qf9%}UNae)X2{u3A!06Zk7Ijj>A9UZxKx?AwgzT4Y`#?1`wnkbhLLEAgcDD1OFYT+)hf+yGU70x%7VNJ6}34 zaCmBuq$(x=D#`(_DTgECT~Rq45$|F-xGNpyK;9o>IcXp7@eSjhAWY}V-)ojtra`6| zly#sO@~r7Y5qTU!Ck*;m3w=NjlYXz?R~XJ=5dp`Cp(v5e9r{hN{1jYKc=H(RNZk7l znK0#X-yI}Wk9LZnKkVqJEr3!0itCc%;#oNRSRNcxVR;qY!(M4A!$2HEo$>+%L!I*S zh@;>ito%Dm5&Y9r*sJSU?|el$8oXR5KrNl48IS1mO=j zje+=D2}|xxJ9V+pARly8=ok{o+YdTPiMJyN+z&d7BqoO-P(SFbyG)~^TRs%Emaef6 zIl8s*lmYcaQDYMf=lW38T4=_{9d(KWgt;mCxRaFKCxXCz+^H9hKoF>pJ8O70l#Ie| z;}cP9xxM&A)Y>YbKH)T)#&Wmu35ShEI4N+UV?V476GFiKu#=P-mLPCH>@>-)A3@-L z*y$8g2_T{jKH{iw5kf%yh?A7*gCI~pg69gU!w3TPBhCg100D?S-6sRb-mg=+&Htp6 zlvM;%f%~MhWW7lR?vu`zy}GrC+5KpQ${^j3MyRzw{b+=WD+vGs6%IWRuJe+ADI&x`X#Ra$K!dYIyS3fA1&xV(jCM2`E zq!cf{7BXjsCh}u)882?aEo6c-2lHdN5h{+aMLgfc%Y)bB!9H^2g2ry{;`DzAdkk36 z18;W=uOEEhx7+0kv*>$qP{^?o^kV5NdF8jaa@Li>HNB{xGgyOLf^U#Zg!u|acxAA< zmA4A;3EUv;;W#GV1*t7Rx8A{CKW-nyy}u>cRk&Spa#~(Y!o9!Y2i$6B?nJJaW-W9l zvU&nS4xMLl@8C?j@STbrm0Rp=MTmRmC0gTfgaB>4IBZkv#A1lKbYl zU2^rpRWO%ChR@tY_e-iW>(X1?8k0+|k_>S5xVXsD{chE@nIe~`b9&oCXU7-nZ1O5T zXgZrcU~WonuDE`@ZnxcP=_|-*Gx;#N9QC>4@=NM-T-smHu2tqb9w+4MAmzS~RxJza z@Er?38L!~B$j0wku6GPiJx+)LY*O(Qf(Wh%JR4fuyJ}(nYW}DvkiYmXJb}!H3(N3| z7%IYxdu<=@JMm`m1&1RFue_`GYPaf8acmC^Ar2!piFY)~J*j*qsyIexP^cQnaJ&^n zyJ<(V)e=d%%W`XupEX3b9}YQBy>OBCXpD-^xqzqk0z8#+@w$A&F+9pXGJgDQQMHQi zO_jJp!DB@;Q|cOHTq7@^`uWAf^`9z9iH-u~nJTC!zEBOzx0TwGuTF6qiKeRbqQCu4TWtTmTh$e5Pn^%BmjUY3ll z4j;Ha!gSuIRvp77f-W!GsEPH!(vDZZmwStvdeQdLpx($n0-c~E)5?d*- z&gJ|%mY#)~5pm6E&otNGh3rK37R)XWmCi~8M?U0!;8F`G3?J^{R#@e(f0@- zWPpoaXcoG!_a8fQn44m_Y93Mj_@nM2tH&Op{I2r0;7w~Dbx6izRk!g4QiAPR!*Gq@ zG{OU+9GYsQP5B|puYPB8MW8NnkmZ5X2&cj&f@FD)XJ4Lb5r;y>UX84=Pz9bj$FczD z(LF5PG(;qQEMArjxI0>1Yyb$H^_FbIen~a)s{ffc^mt1zbAvTf9=d3W-GZwQ4)gt0 zUZ*eKF7lustu_T>v50)8KrB3*C5X~h#bcp?rB$=LQV0TQhU>R6+^NEj!6=_6R++I1 zL^=r3XC~_qr&-@sBg+n_86I543S0xy3Q8@c1f8ec)Y$7gh2A_D62!s}*-Xu_HgQ@* zdRU7%t)TZRsTndZx0j}_6V~Q~nbP!(>_)P!6-!Gzl8oLT!xDNuY$q)p8@llNGPla; z_?YUP6?Pg{1u>l{!Bhon9cQ_|KMG47RAo61LH(9g1@~SAEA8pj0acfmP!+26(XbX5 zsRZJ=ot4;4Q|v}yr?V29X&OS6R_Z%)u-3$y$yvF|+=hp+PG{xnyq1(?^$V1gm6se1 zKhW3YtQ?#Yj{uigPCL41i{Cfz4yIw)LRGyEy^knR&@c?cD$QvR`wpuzryYH_O&Er{ zt%8_t#ACsn2MqD`Zs>_uCf#@pP(x1rISolH>zviHUqzC=7o(3agE2KdYb7=A|;~Nlt41OeMFw{KMEaLCt^` z?r7^^0XjGF4%?(KcyR;|_^@ z!kg*QcrjBFoM*Z<;y#dTG~I`Ttfk;Oj8_d&1$* zO9p#t)$LZYw0C+|t<&;_rM%O#(Ohs?hY{A(iJPuvo~RSYveVPG-+ehIVJxpNja_M_ zj`(VY6qw~;#Y8;sJPa|D)f1!+Gcl1NeS%CK3-RDA3Ky7jDR$1~H?+!D{VKLq_3Yhf z4ME{Uwry+KA#FpN1V=mDT5%(| zkVWyA>nowKzXGb4hg4a$%k^X1NCVZ&K{dl;$JjveoSpVW>bMcqaAzDb1n~085S1mJ zpC4v`8;2pIZ@431x_fknwufKD`4PyKY@@^FsK*V8K>K&<-UNi++{xR&ICla><#x8H zn;-$9aywXt$P=t6U?9Uu_dvsBH1fWW0acv4Vjp;h`+%f#iowOW2G=g0TyK&lI^@g& z#3Hb!0Kl08TDSor;{h$)fY7D`Fhj{1hnzncqH#S zH~zBNq?OufEL`16G>$h&pWOJE;+6wwn(6#>Ku-+MHQ?nGDHTQ0_fatGL#J zxJR2LgK1=*#Cu{C<^)I5h}{HXhFLJXc+7y{6opjntNtpkn`r^%Q^H2HgrNwQp$9S` z6hDHx<)wH`Qe*ak)bRg?NlN#Y>*s?DMIx#k=d3ZUM{yr0^yrwjKmehpW7+}%gqn_R zRCmAv0vE?R_KO-NBs0`l=RS8*8iKqN_U6<{c649Z({l_h_NWn7)3fMnbSSeFA@ zMQsV;x>J->)G`K$X$U(|b)bwDQO+yD2W+*Sjs1>=bv3vzI)!FKK=%w)wzywH%>vCJ z$6&dhS5}d)O`uS^LS>*;8<&M=F*i`PRYA3#)3bgRSBL~m!mG7;P(|wim#7jZ|EkEp z*VqirOc21>d5vxtAW(me*3N)H@-@)T3JpoG4beE{%oTvR9F%E#tqKTeuLasP*nx=D z+pX9=0WN77hRdpwC(2{R%Q&grbxN1tSGi=t8UZ>EZ>VERGg&mJT=C3gGl<>R%mRlP z<;N|`+bx;$0iZwbz|9P6B)+~{EG2$egY#GF!qWJixT`JIsTFcAJ0UaNXsC8&p5d8f zXj-LcQ4ZrSaEU2qcwmPh#=xCctvZ2(RMg{6YjF!R2h0uQ=T38B0YGf5-f6AqRuuy% z$t%0u(^4@YX7Fweq%?jW*Ui9TD9&!mh*;KV5zNVoAqOl5WKkg-073k` zxq+re9mYD(D`+}|9#Y?myMIz`5j|Fru{{iWf)|B#HMAk$%x-9tC2uu%1JzK%0m96~ zi`A$I)(d4;b&W5ZU##lLOb`*pgBPU?PBzMvrek`5g(84vioj$5P+y>fR1{HW?dkoi zA@p_}*iSWA)Lx>OSm}@>3@Sl3AsG-0G~aH)uu}|U9K+i!SaxfLCWHVNu`PzT^HAn2 zCS!V%q|abKRPutB88VuRa{c>`h!?c!-$7n%5~9fafCZn=%D^}08!p0TT*JidhU>h< z>;bD*g=W^s(V+*dW)ELLeZi55oj z*vyqjh7~-_`$#B{aO1}xG4kjtc@%gx8s<_S-_0Ia8ub9>QPEWmKqfRFxm4T@K+u1; zg*6*a#wADtEqFIOfQR;6_V{}c;gVVvk<`_kS1N%3^1v|UDV6WB^r1$ig8F-SQ4gik zrBpt~3R5cKH9D)r^5RknEEEAWQv~by7+atkKskJj4WLxwX%-JxfWZ-YF{wl@ULuNo zh9;%*{qV$Vnv}}-^LYCPp$UojAepkfLMlz?%@Rxq0qTmphAsv3gAqF<^oHy}3{`;2 zBJ<;x-3f1`5zLD-o3aT6)3ub%$E{iwCoD0n0P)JSVhm;Tacfh@a8g3Kx40jwb~|-k z#Yo}Y?&(s#8#|_@9A2yo!z;`O9b(a@w>gm?*}KBjDA1xMNMGhIIz&3h@b(_wmMmmF zyg|%o@B~ngr`8QteAyBI;=}D0o{(&Ss}fT~X?L#F$18ZTq2GixXCX6$2)5#oL@%m0 zCF=(pBmp^##~9Q&%Kb8IB26yg!;IWD6$qPqZ$q#^x#6m+Rf|-u^}|8`S$^g_){63* zwmY?WBU$ZeC1QA9UTt&n=O63+EH_qfr5?BZ*{u=>fHIk)#wi7w@k7oW%#LM7&h_D0 z)4U3;$AaltHVa)Q;Txs1UAJrw7|McRmaaqCc;Nokah;kik~w}w6dFiVQboWiLpI-rFZkl`?gV>2v>c2sE`e&JESm^qcj zIU+C@72tO~8_xH`ReU+T2xeNA(=(dkG&C2(ejxKLFnkGkN`_licaY%(X{^Gn$$d5P zPzBmKD%)&ev2u|`Y zuG8babheu^1iv9i`t=e?r9bGAzAO?@lb}PcQWZ+$qB^7e6C8aH(x==d$IRg0lP!)E za=XpZ8 z)Q1DvbKbQ&eu7G)ws*^~SAi}rpHzHC*?&{jiXinV$Bpx&cN{PCg1-|gwxhOp$8SIx zZ6<}iF+9&Jzr5?*gs2wLakahM=MW8$3kpR-M-_bfzF_|8WaRZd9k}P0oA?Mzv>G=q!1{S5S8W~aH}!8WpGO=wY}Y@3G>e>o4e*H zlEIJw;$U9@}^A;h{rE_6!W2IDG8r zK>xmL_6_X8E89!l#waP+vky;_^1 zcvID~a?igf;|iJu?MYA~a;Kbj}}}flCp} zGaGPTQo1&a=pS>T7EZL%DT@$v%;X?h$c|yr&7=BiNu@4fp@>N-nE7CSQvLVKH0mSZ zqj(P65|db*Pfw##m?EgKvu81H(xpSc=FIdo9AR?O*B63n{!|n-qT2Au&tQo?kvolf zM&kiZKdC3pRX8jLn?O@omlUwZky3D6m>!cN=AH@v=%Ak&!wEn555&<8FAJQgpotwt zWSYvJ@p) z2w^7pfyopvZty{2auP)ZMbvkOPIk;s@{~+tq8?r${gljN+?saOkL0jI)XjY_vaegT2f;bWvkIqS?nB1 z6H^FwfnwX9{D_{aUA9?82dSGY%q|s-vjC4G9zTobt2)Vqi3?dYgetYb%KsT0@2DZ5 zHY`qlXjfE9k7TiC%n390O2`8xKy(R7C2;0Kb_&O5CNoo=g=~N`5PS-PTPfI#hz63j zhalDYY1LP_`#(3uQybx6lRR)L70`&Nj})K8zYhu0CJA;qDEkHM>BH8VP*x0{JH3gL zjnPJPO1Y=-lDS-VH$79pLls~qH?%v@!SLPt$8|M9SY27dI4wvlJpD)I2W(WHo7aH} z1-NX<2{|5A{=#Eb%gg@1Cu;+JTxj#=?b~`n-qW?*rnaoeY;qf`wzn(Z!%|vnFHhZF z$@@rMz7(N=nT>vdRP4zG*q$i$&78#;NAsWsv@JQ8hd6657aJKhkKIXqmJh=W~s`e!`6|f zI~jm4+#Fo1e42#7rL{P!cPAoD0K!P$XkU^#S0N1-%p6KohtV^5%M)+ojL&pHN^G7)K>G`XoBT1EvQ&qwy3l975JL+SSiCA?bMF& z+*%gw?8<^tAqyRsFN}4goMYYi+zC3Ec>0wejRJmQT_EIY-cgef0$DP16!I;Z0s?VL zh)>}@7V^uYz$|IN9q3uM12x0GIK7P-EQGNu#&wH~d(?+j>G03xXQ2N%2%-C@aaRH6 z13kl>Jbt!6KXocM7MxT{ew>?^2+S}AXC6 zO(85zjbxb)3lMFc+svq8&-%y8r~0dw1Y5g$jk@Hf2R-PgAKdvKY3mciLTuBcp662ZGG*MqK0*8W*lB zOk53DuqF}%vkmq}cIrJMH5Ki|HU1D+u4kcac47tlA9@AbAH!=}{|kW?QeprDHfo9* zO_*MB4+Yk$ShmVum0EX)iduOE$Y<@d&lfO1M@;~)&Gjd2hgytC_<+pPM$Nz}3n2*S z7777u42Hsn%)mZrF*|X}X#Z&_e8?!YzpR%zibnF-*~LU6N`jjSglnDVATqi4@575g6=@G3T#p<+5sGr4Og_+ z;yNSt23D;=yH-Cs$Q3R2g|4ltHHqk|YjH`?6O6D!2o1v=np2Nhurv`@$nMaU3t`RB z%!oLT5z!^OPFq&c?!rzLhc4bndDC4iMy-+q7*9&yV|-&!XZ*1oI-e#SOyHv7yzx&k zyid$A2*U)19pXixddXeYJZeJ|nxP_s4(iP;6%lk0yHY$+X*n2DN9>!N0Ny~^q^ppw zup33ZG--zh))DO0jMLlT$dk+0#cxQYaBVFtZOxecR9095+fkoq1rxC02|i%=pVQy0 za;C;;u|kYw>~_vAcoD91lA=)bb#Qa=8ZN-}Z5&IsrdC5xxqz*t$zdShNQ-wxXc{6f z$8cIOghBII{o)|{fX+V#6Jd)k1nJi&n^Tvly3H;_s*4h2q*Rtd@wy)88BHma_v`DL zA{d2Sk3QhNWoU^L!;P^w#!n`2HHKOw8%EA%h3MUdnkGGSW1WXM+-gOX40;6^)U9Sa zBedWMtpb^pjV9od9vn2>YIJK13AE%D$wk=W4Zol*uV`3Y`30?c1z5H%EErjak|~_B zkLs^Wm)L2eWK~#~3sQlhPFK?qI2V;i-VNczf5jJ+H-tm~*J|#;A2<@vB~sgz@ARU) z3%e{*hZK%LB+Ox1Bj~52x+Tydo|8@P#wMR}D9jRQx=G>gfox-KtQU)630xks{a`Jd zs9LBrRyKKQB5ua6+KFYh)he`Dk{Xi?D19U(9QR{oSTZ=eA*e*4Ihs9JiVsKv8U4H zc;xz*?9_X9W7WF!{C>nrHzJ*X>GJw`RVql|i;4C4Sr0c(4(CVDX}`}hxl#+&sod6Ntr+mu<0MyZT8_Zh_BPMc>sUD9>{f0%{Z`w> zNnDDJ*MswPI_Mf*sZIyJD*VJVb-1rRFQQZfza@T?Qw)30&yjAdFP<~pjiEn)oCvoKjV)^g;b~E= zCmw{Fa$%?V4%a2mw2aUxUfVmL-ZX6+lnxR3(5~8W* zc!Z&vnBXtdVEUaX9HTobynIhrRazIiBrFv7;bQp1!_{gF;bj_3|CQy|!`GuEl7@3R z-(9sexXP{ONEjN;kLbyya(lx}K*SQ4k=BNTRYaoJHd`Yc5Q};%ahU^#MA$W?LFVeM z5uAWVjt0@db%Xr9d)ap5M#MVm&O!>*! zX88%Dp$h(cy7FP5Rfn#8H@1@>d1RA6_o+pCeQcSPx)LtX&_XF;Q3=P8TE_Z-9$*N(Ut zxs@`7*_=y>kUAm|89WY6YND69zu{K;9#Bdk2t=d~;c^$wQn^$|FJRFk2wZE}crIsK z!^i-d!mVLs0Ag>PRy;!S1zd*0$N)dqX{A1CL-=5NvBxqvDIpFuuDl_Vs{Pa0FOkNp z#-Wwjvxo^<$Quq+5jyfHln1)LPyqf51)9{*Fod92N;a^}b^#*Ta{-R>05x>IpTvD~ zIUKHgb#{H}><(4)4JS!g#iGz@cAOyr1-MNm-d}mb;jtTsA9y^&+ZpGh0V0TW$JT&q zYFb?{k`6lPpPiI5Wvn^8Mk+(afbsY3(PNO3=Oeaiiv9wwhgKudDI@Gq)~aH%*!{yZ zr{E+}jWPa1bbvCQz-}dYfss6Vy!95J%8iB|0|%iBLstf)Ld+pc6pzZR*5@_>q3>5? zE9Dlc8j{vwXDJtT&;s=f=2mbjl=iTWzfKjoW?eTcJYt>P2qlgtRB;_Ppk$hbm)-^2(0M9NWDJ5NJUUyPwZ+d zwcm6&rHBqc@pOjcNr%fDC8Hb$;s%L4>w0TS%@{1Q2VvniK_uh=*cM3ZD`!h5JbmlNmU2TwrDX>2h6r;NmU2Twm?#`4;jbv1gPXTh$_ydnz>~GQT_iYsk19c zIo*Z|Uct!^ox@saK?Q`~*#<#Bd$T0uCs7V}EMLTwC_a-2;V<%i|2c#JQc7f4X@f8Hd_3Fz&jIimie&m{^ z2%4{G9paTsdPW9il)FpY4mKW{M7g_aczr*)$ul6kz`xf*xDf++P3)wVy4eVKxbQUv zzeFj=_EWf-6>UsH;y}1O)?c+aKqxy6CD!E{+@VUbm(zXwE)@I!3aQ7{WK_`mwAicD zgZs4DtJ8z~Aol%IE!6QK-29ilWH&!0LwANAWjI&}v*@69_~RBh3h_bh@zDu3ry7H-}2VHh)L zooXo361Yn;k0=u=8blrFpo59*DTL%qPGjF0cY<&{D-(~@xsxiBj4bM89-;n%ITh(8 zG(*{S;Q_;3FgC^DUqep`&S-9QY8^^aTdt@o?DGAgUn>Q-iQmn?-GA{;ZW$*6#l#XYn{ z$8Zz<2v6xSe`LH<`Kzn%D+Grk@Xr9v+eA=+=`z5py>rcSj+uIeF(U>HTOkiwF}R2l z3aXH{a~|Zj6rS};unw1xnk=k&-ZG;F-9r*74#t8dve!)6JLTOM{QbmTdM0Z7@` z{k4;=SSvA7Jh_f zVvC;gk{2at=aheYro@V#BKy3~CQ6XbDLg8_FSdxYaiwsUd-OLW7UcAtVFh2`=`^4! zyJTikHw0$vml$Gp5DN-z7Q$a}_ft%Q+P%A{4jwpo`%Y1|f991X5VtzBhjm6m&Kd?4 zW!S0OiZaN9F6mi-c!P)LPWIn^RPbouxQGDUbOo`@GC7?%65nhZhI3Wdr000(K0EF{J&uJwrP=eF`iCYg!7YXk4nTVa#s={UC=@qHD zG+r&taNG=zVXgrjZf$_U#%Gt9XQ=@4rXg4(nKIe>65bci1uJ+Smf;qxm1F7bQH|&C zWHGiWY)giL&oPi=SNK;4BJebjG2c-@3&qWLbSDqmiz$W5gZ5%dp^{iGuHF)A3J2S- zi+|r?gm#aT8s@(wm!jnVD@GCv5YvK^%VGq#2^r*pCw8%pi+}~^7@Mp5lua6fQ{lzMj&_YcR1>)UZ_e^mHQ3+2i*rG=A9O~rM@@SZEc zJdZgyU6_@ODTCP*z=_5eIm;JGF$#0 zyE(eoHE(xz)NApYh70N558F|TxgCC4XvW{4 zi1E04R=*Iv=!+SBN$6(fQ(0Y%z~9xh!4l&_v+{6~k+9Uz;W}++&N*V0km*#E=k2Ju z^mCrUv|5?Ls{-_?6z5nDV+XCV;bC@e?i4;*>QER;P9BqGhNnCx$to~Q=TS=$9Q;DW zy-}1$kyZGGh>PPJ9ZLB6q#T(SjLR7lY+Hpa7i`@V28~SMWqOauf_k+X1WW8#9s*eU zyN{_4pP&X{_?(AHdj%lUqQS%qfP+|j|XzYtX z2dQb08{I7}<1ze4qV8Uj_PVeX#g9eBkr!vzCR7oi0$QPs?@-j8Q1&TPEq(<>Fe!UW z;!+UmJt__pkI4Yi>NNdqIwLCdETW{XLPQI@V|W9H5`_w=KeqE1F_fxDy|hgTPEC@%rPU&1Stu^r6ZbvU6husb^>dJgT5 zn&1jQja{2+H8p9edxhCiV=8YseEbv(T8-bpX2tTBG6nI+PejEnFUE|{%}}eGNL{H+ zpVk9dqYDgu!Viq`iD>Mq<_}=*6Vb#Z7Jb^L*_+~+x#^6fgyi8+J0j^kl0N5g^B%kbjz(V z0i_)jRQcVgxW4*uleeapY^|wsoP_CaCPCl`yf#c+w;fsGq1{!$60>+l?!*O5Vo0dv z4>q&a3(JC)8&V&jEJ?u^b^mVEx2*Yt0n0HFW-JQ-T^$t9$c&qyE}x5X&%l(Pik#@@ z#z#=*=Fs#C20A@A2K5!<8pMo9ti#I3E{iSv8D!1 zp;@o61cs?2X(BWpo2he?PP~)0L~Pv=kD~4|FCl!Ec}2uj*Nw%IZOOnv9X}XA=+5Tm zW<6_E5Sh=3H%Wy{u$D^c^HKh(XqEu4-s#!z=hOw-O;-G01KXT!?=ovgn)CwV=_W0L z_RmMXF4q}f8S?Ydx=qGZD1!3ON1Nr}Y_Vnt7xPq9I>iR<7C;uPh+llFBmJTfwFy71 ze@*;4gPIQiSm7CFg=A+!5~OsGeuw?crP|g2i6w)7PBXjkv&nV;ho|WFRg=m98kLxj zUI#{Filh7eIZhNA4mn15hnx#4^T!b(Dcy8LP=BnT55JL{BDEr6N@@x`u7KcMo;~gg~2S19uDJCmA3FGEtN~cR1C? z9fc~*tHKW1uOeC28!mgqc3XNeMe%~7c+uY`Tor1VYttrF4x6N$ICH|ucBbbO@SyF| zOCK0yQjIk~Qyfab2iOB40ed>Tz8GSz@D=QRQ88B>GL@s{i_y?tPaFB}&;F@3AKnwc z$Dx<{Pe=LNsy}V$P4NW=g&Z(3K7%DM|2MoQmI~41!@=dlE_$Zq+SsA9(WZjH7jEEo z0W^D~u&3?V(i#{82>LAFRSE(M%{jbUk513x8g-0|ooQ&~cu6{5-UEr5r@F4^;5+6; zY!9PXP&X5Dd66@1hpO5Z(liziPM`GIn7Z}EB?NhNlkEw7KOGUS+96mWN}i60SM7`7 z`{`)eevj{h!}rtC$UX~ALyV-aM!9dm9`|SOZ>Y*0i_@trgOSadO(q`(NeahicGiwH zPVX)EHQxJRxtu6s(pQ6wq@YNpXQTYr;6rOJ%ZIdtjODltO<+2-NsBO@X9Lr5L6e>h zOot*9eKs&1iU~9QTHrdA9oP9<;5zz6T!%U^9i07%IRwC8k5FRWARrVKH84u&(h&}_ zutA{q6s%5wz+^jf=D$pZyqY3 zQ94YS^(Y=S49mT!;GpCjP=gvS_y)tQ9osu2Ya_U-T*`T+k@X$Q>FI^dC%rqQQJP{Q zeJAQ(=ClyRDkAfySO!W6?~M}>g>B74+qZ2F!9l`TzoFYP<*vQD6v((Li}&ec^K-c7 zIWOx(z&TB>yB@nybd@5CW}q7P$c|f;y}^S}ya^r8!U=1(<l1FiQIcr%RMc zWWZZf5^ug~+xfzIaZGW=Avo=Ub__aA07umS83QX~ z-`ZJn(IDM*d(49r0TTXzI0@mHw9URB4Y>@AE~wP^Bg|J64c3B6{d0uN;1++8xl}hA zmdalP-6@ieCan-1xD1YT$6)^)ttWZxI(9K-(EZO5?t%(QZ}Fvm5akD}W10>QqQeyA}VYW{3nO_Wc8KP4Nc0q1PWo{R62N$2X6}t1tE9z=BK3?k?xbv9&sS zVs7rlY~#q`-iZuS~zb|iZJonMAip!04({FWMc}@NA$&hmU z%;Mb_7A&~*l#AtGsitX@WG38W31)n%rpg_S-8;%V6Z=icPs|Zi`P&I?5G8xR@o27n;#bG47OQ^a~`LS?flb>Rn7TqAU;!C4&&m zqu+OJdGn=+VjkHXIId8ii7R`886%MSg~0m)*s{JX<;4rmLxmixy0e+-bAK~kQO4HY z`b%W-DoKV{1QYf0{@zznK@rfk#2yL_z)Dc)&+uo$*shDjO|x0+`QcT8lmDIYKxcYzno{`88IsTud}(+kpMbx!*9!{vOc zHy-`1u2m3W@geL&>yN~JMuI6|>tArrQ<`rZ)-~lH2J2Oihx&zhgz@(%7U8{cUbb^% zltqn?e7dVV z$bQQ1f?{K9SNG23>UiMp3o^`r5MPkC>o>%M&Dx$qUmQy7*K45tXR-s)G9iWC^HHEy zqrO?AhNgbhEJdn=B~vLyUK0<8>K0-l%nJ{+X@y)k@|uOYGd60^RMKNV-N7&Qo$<1^ zc780QN{BUGGSzFp?SakCUj#9X>Cm`aV}vnZsA)pmYc+05TR(0mfSd4=RK4uE zwqhOKHMMX5)R8^+wZ%-r*E{{P`|w`EC81ePf7zGp=ePW(H{i*Uavw1z9Q&$!rtaFk zZ}0BIcXx`#G#U@Lec$u4yY{|xFsdr8%yO7nYwN@J9b_N@+VUaUe`L3PDs20e9vt`F zp<8*q0zx^SQt9Ms6h3-1SF=tcO$U=!rc~eX*dKlM@qDem3bPJNaY<;l#Mho&@Wbtl zHdS65cNy2HtnP3+d46KKb*gDHx(s*s72F!u*V~}0D_3L&4P$c1C6ligqk5#0#J%YP z;J}&YL(FuX`5xxIp#y*|)jSky42o6tk#;DO>8*IiENIq07WW$HPU7eYLFW7OLC{jo zLvW=*u%gliL1K=qI_>do8=zs9S5`K5m|k0*0LP{mQ`N(=*B8Z607eUl+#U43;2 zJUCb;jng(E%J9m|K$NNR{1EMJsp--4d=R;nNg+xd%=+bE_APRKR=RSz59Awb7P>|f zd5U32SnzrNFmoy9Hk<#LIhd`pKQVVH<#yPg*vOQ9T@0tX`O^hUZF4ZY%Mx{y`|qx( zy0_+ertF-2-oF!5hxT&6pHD1Xx0ETS&5*5{zXE3O#!@z6|EMh9g+81H?zRNPT=)m_ z!-eW?=%E}R<(}pKke3kA=B%?;8%qu4%^x(9r2+{7J2CDdRj~#c!13OfK*In@pM42T z43P7gYnp1JsTwQ}mExswj}<@q*-zB{Pgfa;TiD+<*sYQ}I-2s&>rV7ee}2QFR< z4>38{J2%Q8+oGbR^EM0pHR<>T#$S_;FKg#DjPIb4rCPmslb?9OhNrrI zdX)_id}H}dh3Y{KpXJmk2)IN^8Z|uw6bUCO@d5>4&K@KLIASU?`WhpnwHsP-tj0)a z?Mr|)KCmH_0Bd9Mr5>ze082mJVPJ9HpU7`75D_VGSvRz|6=^C~qcZ}dp2(HW)*QG#^vb}I!3WTx`UY7zv+QQ=X#Tx?mw<2=nbO^6Ed$~1 z;=TZ3*+XbQIS5ge9mpSa5N3F8x=_ypVGCqZqzYt%@Q5-P9!Mc1|9s&<3ZYWX97rMT z2EsdvcLoT%J%sj?GIhZ34(Dc-sUx$q8XtRUd0pZe?8My{7WP~;bquyWVzH?0ADYYN zK0QqEc=928H{E+l*zgu?H)S)OKJErz)C^s?8s_uG15z`^k6Fjp&L^a@yKVxPNa^`Z zwE{agE8q}f$r}q}p1mk%B(D@ET&5e@+Bkk7|6z91y*vQ0j4E-?+>42XFM-Z`k6T3A?W_X*j8Z7kL(*YL$0y$X!1y)~7q5kd2d=p`lmP32>+bMiMFQ)A>kouT+fc-- zebyFuFP*;HXNRFgr?2+eVJOk=)x1E;99n#~FpY8ekMJG_nEuVe{`QvoP@$pKCv)Ht zda<_El-p9v^GJ36J|e7QB`iIBtg~6+=|sHl2~&NglSN<2aKRlLb!A~Xb;lNmp2kf3 z=NwtFcr%U1e!ldsME)f+X(Msd86S=s-b$kx9t4SLq1jOL6qy~~8F}&vl7c<4H2IUK zLq}q!e`exL<-$p^@VQ#k`xg7G1Ww~peq^_T*NRNvk2NgLb3g`2~WNl5Sw~q^c z+NBJqJM}3@&gTbwm(~kUX+H{Ej4EBP&;2;>WQVhS5j9m~@&5AK;ZCfvG4O|ozON^+ zo#G10;D+2lCm8lP zzi4}yXZfLNo&#eC1$wK%TNvqr9>+rwk@vA^8d-z71}F?xiPG!^o}U^^UUd<=A7=FBS%VzSK-CTqu~|6J$C7?BbFmJ z=w6D_iHjr3P~S@j7*}=)f_LzSt@J}(+7rrEw1)IUubY{l+w8Jfsz<#X827?GUlG$F z168Lr^+!VyI(s}?U5nogf9Eq_v8|r0p0DlOe~6&yyYJq=&xs2yy0-mCq-hH@hIv%m zS5x3=9dv{x-a;4F?;noEvQ1(%t-JMOdIUiCRD*i({17Q*%WFMUf?+z$}?BP*Nkr})v;`De`JLXJ|}GXFt} zB%IQGDZ*_(6kWO1xUGuJ_d^lBTn7wL+G0Mvta_u++RowpHLS~=2kmCOpI~pUUm3d4 z(s4a=+LKxIVED=?ACU!u%)#-F`goN4&yc~(JXWV)-M#4&thQWLgj0DueLTWZ{!TzO zho@gx*1KEHFwpF3J!bWHh+%{ABsP9B@Jp=%C2%>NkoiCwh)MtoVbEdfD8y*L9<9B~ zf0hl!ugl0+(t)`R{U_yN+Q@ELkcjMu_9bsd(qR~}cv`v)OqS=9qB1b8WqB;qC*`ZI zHXAHlN`5oSeF{X8H&?Iftrt7{sRQlFN8~r7?qPEVHy4plDi_uD*a8xBCQ#o6SA_CR z@FiWjDNwz!_CuuFa+!%enb8j`Oh?_$nA5pod@@4f7Xdgoj87_4)ghJ`i~rwo7XN!1 zPk*lK>iuDR8_ca6TL%2UBbyxswZ-1OQv9@XuhlYJ#qVc#!a9q2>NtX@NC%$|1W%Eb z`m_jMD`f`rpZOOs3L3Ifv~-NZnah6$J~3(YrOYs&5ur(#K`myCkZac~a(-yhQdiM{b?kk%~9-opJw7KT=`(Yt{$y1zDV}(WaYlmGe zYe#FhJ1NrjQxQ(%S~`kMw)9$De+j>W4}Ko=jdW~&A>wxH z`Vcz45OKZbP0lhbelg1ZHRzbX1ReiW-cY+dIvme$Lxl_ozZi9|j<01&vDd2ijveY z#;X+nl0p4fA%mhbFyfe0a7-~I1XRa=71bP>xs-{2rKY>% zKA$u3nFt5t?de*KH2(a2sc#pF5$T9;VNe~A&jcx(ufXt(r0jXt1H-{*quke7kK;kv z*m&cL?)9T#Uj_|imYp=-cd}f&S|)YR>Lim0jI~&$_?!-i%ZtFfs06uD%aI#Jx_B-e z1{8tpIUNSdt=d~D{cV){4v-y9A=|yFdwh92WS(glP@8(`@o#k$h-dT)sBgFP4EN%3 z+<3+=s(>u&IG#a~4!#|D21P*nws?k+wWLz|UX=SMk1SJ;wj;}a#MH5QMj2em61x3f zK-Ln$g;m;>hODq(Gi0h8xZ}E>L}>w-G;v^3q`&V6Few7&_XVaf*HW)^cvA3eS?59V~a>Jx--%Cnr9E4H2Y4lpz*%{#6WKs>CpWtC~{R+%C_zbWsJ ze2T#OraY_sW5C*9DZMqHds{vlte#<+9bBf`wIgj!bFnj@E9U&}W@)AL{ya~C4mX)08*RAB ze7XdhWGN%OKj3Dmpm{Q^6eXac=vIm%b)O6?MG?X)JxFR8Q)k!PX1w5GwCWvp`r((zzpK%s@}(t!lbY z%pI_D<+ivNaq$T008dS0Oz2GbQsV|Uh8tqykgG$M5oK9AHZv8|(*H;XyTM~erq3KX zeG-%Z#hD|zh+@syVi4HU+nuiBZPYW{vVQz(Gr!gM#DnbvHn#lFXxUxWxSPBJClzDZ zO!)ey(sF%cJdo)_{gNuI0`8?Svwd8F|(Un%yi*~C*Z*$#R38HnO{bCQ(Nu=V4*{Pv>m!sliiYkJE=uSPsY zcMD^*C|i_BdSae~59ohN-w$KSVc%{Z_qlV_gp%TgHbDX+CRDuVcs6QD!C02zOQ#0@*3~`yV8oJNeQl7OK4i*g!^3jq&0&7*e4@a z&hPiy8Enp-)px*}!E zac(lPnz9X{d`ASX-Z#%MbPgs`+PtHm)lI_|NJ`^E^H;6zrKAJus zBE1(zdqaL&g!jVeX!>Xe21kjv^boM_tfmV6! zpPa+#&yO0~1b;{P3Zd1c*#l?i&K?&Srfzfu*b)7%D;FiiL_xf@2}=PRa3@tQG3Dya z^gKy|$YB(wKT*4l<<|S?v_&&`uy%es7&|u0b&aKR0gyr;*2Q_4gQ)EMHa$E(Uq!f! z=J|!YO z%dYRO+KUzxroFa))m5I}^c1@uZP7ijEySCRxtB-P1GV;}+g8(UM~Mysw8M^s?0sG# zw-}zyiEKA^uUO@uk2p=QLd&zwc$>d$G-`ie8fom>mM{1C?L84U-teBTYT_Si(a{6w zRcLMAJOH^%Cto;3B#Q6$1C^5|d{^lK+x*}+VGHAcJexs5Ibsj#EYECluZBz5t9$L> z2rf-7Sgay!`vjGzr(XjBd2(XcJ9a&b_W`Vm5FBlzi=%V6kiqZ4xw(m>)ARiII?gd* zxLXW5%!X#5IPvGA_9f0#ym*{D@sRcD$B{8~I=UU$&BN4fca&-JDGR|1E=`|@@L<}1 zt0CxNW)Dk799!fW+DC10Oe@{Ee!8r1ZFkC%Jrh02t15wfY|A$1WBDuLs3aJLfMf#6 zKNkHBoSEaewn|tdx37o|q=SYv5Q70DifQP0s5izi9-(tiStx7userwMbp zm@26(urymn&*q@Hw51rxN!Xm*&MvwdyUK#t*m3I|qclE8hVc>1Q?l#SW|iWRi7@Hn zQF0!ij%#)A<@!S)h9$LmXgN+N?yLx6gP}=j$%#SM4Q4=${}QHJY12+0t=Bk$vL_MWEA$olyS*Rt~s zK?Z5!x2(fT$4W*uu_K$CZboQwex)f@G@af>UOE?9)j;un^BB0EpCE4AnQtQ>B}<(@ z&oEt4;>FC&RF4G?+5e>VJ~NgqYK!5VO+}A8M>VZjX>aQ{TY#Fl56JdP77uP6I^C0K z0Q@+*+IiTLi2}B))3fB|JvOZ@3gSarLgQ^LyHYTop0R$s*yC3|1H7a`LV3=dm_C*Y z;$lR?084tBg~wYNChAUih2<1%h7jfnPL*u7hR_6HVKZs7A3q6#xICOW#iOuX zIS)9e2`f;i0D~Z!c|(cqf#*;l-Zn%oScD0agclU0thowg7R7(ucckl=MIMmw>qz40 z3@d(;!N`_FOGg@~nS%Q-KdyzY> z4Xc}9OI%f#Y&qpijjQ^~eVz%nT<_gA+P1H-f2lC5Gk#~S-tl(1b>dDNz7CWU`vDBh z-RCh!38?SG_uhX?K;3g3rY)34<0JOWW1pCw0)oWZ)M2J~e&zrV!&oK{xE;k0hP zS@#gX`?@)0)4~{(=s-!~VcLczcBEx}cNgX6ZCyQ`W!J*JWyR*NqwFDBuyWKWT=G0B zA>zVY7v*LMe^*q zAUOvuVI7|4-6657cL7h!JNLfj7gy(=7;Xv}ndr@Iw6i&LE%Idh+S2J+My?UmURt%l#K>~ck*R#`T-2eC7*x`J_T7K4qBYT)BNzs6ECwYWLx58R^@dwM*wB<2pBi} z7f)tORrPfbE(6r~~4x^2MTQG|i*-T3BzHrCmOWl#NyYcN)h2h2nx0HJmvbfnU$P~e4@bJ3S#5p1v-g=J;Y_m??8eAp2@Jr+HPWG`^@Q~hP^I$)@y;grERoi z0*q|ZZ02t7x+C+Gofm|eQq-LD@)$dFLQ%^VhF`};U-yvvyR3s`7baF^4#aQ2?U(Fe z*_D19Ff#(&5z24;2N0*CbwRVi^wUr4lZxR}Qpq3mUH<-`c5#lXtfxmEs>R{_vK^Uo zY}gAu?i?E)(IaKZWIkMT)<=B97;Lz{Y&)lQx4N-NFok6V1$6zQxmuoI={2KxgrV_T z?$dXmQf0==(~fnvEKfUDAfx5=l?s}uj-&2x5n!pXqPT@KNwb47S~&bZ!-a3 zl@IyWW%|D=oq>eSRU_+CMH@Y>8XNcRD2%4<3bY$d+lf?1N6bx+A?1-UO0!ouja)6a zQ3;G%xL=bR5s!WBHC=2RTSL)nA~Iq835?{T0xIhb{w@}APwOO{)G%eTzgf;T`8#aM zB2NV>hi`RHAyN4|h6|O$x3&3ecqzsl+EIv#aiad|b6x&!-=`x3m3vKj2Tg3q-(o9` zVvohzknam60Na3fyr{q_{Wh$rhZ3$R8`j=zYsI(jZ)mMHth>qgO3Sf4ChLhcm3%Sg zQS9y_V>~9hZbbuQbnBpjJB+KHHIwReJT*<%`i^_|9n4i3VVtb8z9yZF(}~#;aif{Y zT_dBaJLg`Zv!!_+APagO8q9%Udnk;#^&REvgv7vkW1CnQrRsIx{iQ{WL>O?FmaQJe?$WZgXE=NVJ6n0F|5bPK0^%J!p?)BObTPfo zoVJqW1%wT*8-4&bpd$?byfC4gK*dSx(+ZfVQ@nx3YHi^yTA!fGFxwn0i z1ZFmd3Fv;GXW`#;##9 zDCxxV+tc%5ZTjoEjA#)HEaDCM>unJWei2m;-*%d@XNh{oY*iHL%D9*uRgUdZ0KS|2 za+tj^39Dp*g7t8cm-_rBqRG24sZKrwN++%*D44`@4oHk>p+>$oa-nb=g zCYrpZ$L-sKd3)WGHX~3bDXs2vptK~oD%df7YmY-CC5F3|b_R65z<$4V?3K3P`PThS z?e|;9@AZ>ladzb&@O9;`wJU{2?pnKg9N#FgA_>K`TOz6!c8%TdyDhqIca7hd-mm|n z6t6im#}1wJ3J(ZN_iIl@;ksc}CYn`H;#V(oM=7~WN@MqHyH+N@7R5_%bD9)7-tGAJ zzd$&@tZcp_R{m_1v9n%055>x8+%uod96NdTB;v72j6G95JnFk2lrY=vn&U^N7fm=U zO!fF*b}!a9#e+?_54r0OIbqptCee}-mOpnEe-0Uc&vo}-a{|@Wxua+wPEQ)DHjAY! zE{tcfdLdhI65W3*IEijqMhW0j#j>apr0zy8}+4+dZ?I`u9?Ic31P5HGpiRXZ%@X604!0Pdhi<{nJK z9WP`G-!ai)^G#$MPjJt^>p{P{7WQ+VUYzQqx8`rn?vMCA>~4O&+b-!nQA*?v-TU3p z(tY@?@+0wpIh}>xXO!Gt236O*Fz|Py-JSJ4%^csq$-wWo?$TeYyW+~FlWaV5TUV`4 z>^M{K@62BxrS$@2h77B>wx!~8L`wf_d?Vx^eN#oF3e zCg}z)HaBo77dJO>2|Gkb`VeF}zbR}|HgGxJccRDg$olXNdMuA`Y;Mr*w3%)N-D$HZ zf~vdIcG{ucX*&+Op0u6b&wJ8#+MzvZGwslx@r~)x@e`ki`WpH>U&)$wS*P-D=+i!y5 z-eewy@yz^`>$3V_TzNEGAbAm+S2DE zw4N-Xt;!ObOSj3{0FN!Fyx9mX<;`MzWq7QkPlzHzS~TlUn>j*jISRTt3OsM_%`I*6 z=3czUbfJxiYExX2?}s6Z$rd#7l2~Cp;nkKbpwL$wbC)X>A<(|!@@ARpeJko|IT5UV zUK}&#KIo-53bY->^U-s2S`J>3-nVErxPC)Q+}|3BS9c$#h}Y{Oqhv4c3-u*&Y+dQ(S*Vkmq0%g-{T__Fcmb!a3$D4XBUNA8Zo)b> zw!5eJdrv2;b5pLL>fLeN{8q`xQavV1Rkp;A?HaJX1QzXd>{$;P{#>7ohnhg_xrlyI zvS5nk1i8@ZK?9oWUy0&X9h#&QM?d0D;_N|eTJ6>&wOP*-fI~CdZ>x8kF}&r+7b=S# z4#&pZC(5;GOXDLG(-)W({c2u@aCY|=3C+f-9 zdujiX$=9c{{3Dff4qppI+3Q3#&E>9+y+8%vY3vy69_$Fjf;L3u-BH|cbl4=f&OE<4 zY+AFrYK%=xFH}0U*O>YB%9(v=% zYkH#(CB76MoLIsZ24xH1`1=fn`}6|4>#qm zcEd@NvI((7$iNF*wwq2jM22}`<0_90zESfkj|@r-ca;}&@S>4&)YjBOgLSX9{^S~s z^3o|(DbU@d?~W4PP4>8FZAx@E$+Nb=kW+JEOa2O{XQxEFEonQg@s_lmkh{ePu^>xc zrMNY}qbOpe7ZKxDuidi@dWEf|hw}Omv`nWOim@63uZcN(=12=jM@D0#z^q3?2nN)-h;S@kzP#9WbG!%vo;go20 zlSf>MLh5r#Jay)qX$CsX8 z%1q@Z#HIo=V=A2{vxF9|UOG%Tq}0$K(Lvtw#rj?xofb~)#fmiC^)3rrrV~l5j2=2k zR>sTQdI>{s`_m{c@U3zx9%x5G=hObuX;ddW9?Q*&Nq{|!S~(goX_?cy%k_}0RlWMf z)Cyx{&hi`DHH%p(po8||E8ekrEDB{~Ql)TNxe8rnf!Le<5d)A6gz z;KL$%6(I9+8eUQ7ba~{a@C_O+-+s5Lq1IHSUH9(U za1{1&H4IEc!}S^0Pi>PNn+FnhalhaE4z7N0I!1{e`w2BAra}LD!VPy@am|5z5~89B zuz{ANaUi|iGP%KU@*2|^OovmSgX!H?Ok=Qlw;jsr9f!KZdmDJ<5!jeV^!Tl}hjZK= zpu@g7ta^E_>dp)evK%8m&BETaR`@f{JwdJT(6}e46-~Q2pjqK3Z*O-i{iyCKXJu%1 hjlJcp^t0e2?N$PY4rKlu&DHJX5@Q?fUL82o{{g@gZcYFI delta 681 zcmZvY!D|yi6vp@MZj#M}K(aIygM>+J%?8p#^HfwfZTW%k)<#BQ~tF_$ujzyDtYc$?-@tVIzBMI- z`HZxD<2I${Qxe=FwzWSA?znznxkF;qShfr0mH&0Ztu9=FOVL}HJ#uK+l-A-6*sE#@ozhIb%}`(?hnU&p6?R>=SwUXKOP4G+uQ}R!6RuXixMIrh=l31qDCugLH diff --git a/crates/proto/src/gen/tendermint.abci.rs b/crates/proto/src/gen/tendermint.abci.rs new file mode 100644 index 0000000000..2cb2c6e801 --- /dev/null +++ b/crates/proto/src/gen/tendermint.abci.rs @@ -0,0 +1,2579 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Request { + #[prost( + oneof = "request::Value", + tags = "1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17" + )] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `Request`. +pub mod request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(message, tag = "1")] + Echo(super::RequestEcho), + #[prost(message, tag = "2")] + Flush(super::RequestFlush), + #[prost(message, tag = "3")] + Info(super::RequestInfo), + #[prost(message, tag = "5")] + InitChain(super::RequestInitChain), + #[prost(message, tag = "6")] + Query(super::RequestQuery), + #[prost(message, tag = "7")] + BeginBlock(super::RequestBeginBlock), + #[prost(message, tag = "8")] + CheckTx(super::RequestCheckTx), + #[prost(message, tag = "9")] + DeliverTx(super::RequestDeliverTx), + #[prost(message, tag = "10")] + EndBlock(super::RequestEndBlock), + #[prost(message, tag = "11")] + Commit(super::RequestCommit), + #[prost(message, tag = "12")] + ListSnapshots(super::RequestListSnapshots), + #[prost(message, tag = "13")] + OfferSnapshot(super::RequestOfferSnapshot), + #[prost(message, tag = "14")] + LoadSnapshotChunk(super::RequestLoadSnapshotChunk), + #[prost(message, tag = "15")] + ApplySnapshotChunk(super::RequestApplySnapshotChunk), + #[prost(message, tag = "16")] + PrepareProposal(super::RequestPrepareProposal), + #[prost(message, tag = "17")] + ProcessProposal(super::RequestProcessProposal), + } +} +impl ::prost::Name for Request { + const NAME: &'static str = "Request"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestEcho { + #[prost(string, tag = "1")] + pub message: ::prost::alloc::string::String, +} +impl ::prost::Name for RequestEcho { + const NAME: &'static str = "RequestEcho"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestFlush {} +impl ::prost::Name for RequestFlush { + const NAME: &'static str = "RequestFlush"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestInfo { + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + #[prost(uint64, tag = "2")] + pub block_version: u64, + #[prost(uint64, tag = "3")] + pub p2p_version: u64, + #[prost(string, tag = "4")] + pub abci_version: ::prost::alloc::string::String, +} +impl ::prost::Name for RequestInfo { + const NAME: &'static str = "RequestInfo"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestInitChain { + #[prost(message, optional, tag = "1")] + pub time: ::core::option::Option<::pbjson_types::Timestamp>, + #[prost(string, tag = "2")] + pub chain_id: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub consensus_params: ::core::option::Option, + #[prost(message, repeated, tag = "4")] + pub validators: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "5")] + pub app_state_bytes: ::prost::alloc::vec::Vec, + #[prost(int64, tag = "6")] + pub initial_height: i64, +} +impl ::prost::Name for RequestInitChain { + const NAME: &'static str = "RequestInitChain"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestQuery { + #[prost(bytes = "vec", tag = "1")] + pub data: ::prost::alloc::vec::Vec, + #[prost(string, tag = "2")] + pub path: ::prost::alloc::string::String, + #[prost(int64, tag = "3")] + pub height: i64, + #[prost(bool, tag = "4")] + pub prove: bool, +} +impl ::prost::Name for RequestQuery { + const NAME: &'static str = "RequestQuery"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestBeginBlock { + #[prost(bytes = "vec", tag = "1")] + pub hash: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub header: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub last_commit_info: ::core::option::Option, + #[prost(message, repeated, tag = "4")] + pub byzantine_validators: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for RequestBeginBlock { + const NAME: &'static str = "RequestBeginBlock"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestCheckTx { + #[prost(bytes = "vec", tag = "1")] + pub tx: ::prost::alloc::vec::Vec, + #[prost(enumeration = "CheckTxType", tag = "2")] + pub r#type: i32, +} +impl ::prost::Name for RequestCheckTx { + const NAME: &'static str = "RequestCheckTx"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestDeliverTx { + #[prost(bytes = "vec", tag = "1")] + pub tx: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for RequestDeliverTx { + const NAME: &'static str = "RequestDeliverTx"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestEndBlock { + #[prost(int64, tag = "1")] + pub height: i64, +} +impl ::prost::Name for RequestEndBlock { + const NAME: &'static str = "RequestEndBlock"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestCommit {} +impl ::prost::Name for RequestCommit { + const NAME: &'static str = "RequestCommit"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// lists available snapshots +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestListSnapshots {} +impl ::prost::Name for RequestListSnapshots { + const NAME: &'static str = "RequestListSnapshots"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// offers a snapshot to the application +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestOfferSnapshot { + /// snapshot offered by peers + #[prost(message, optional, tag = "1")] + pub snapshot: ::core::option::Option, + /// light client-verified app hash for snapshot height + #[prost(bytes = "vec", tag = "2")] + pub app_hash: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for RequestOfferSnapshot { + const NAME: &'static str = "RequestOfferSnapshot"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// loads a snapshot chunk +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestLoadSnapshotChunk { + #[prost(uint64, tag = "1")] + pub height: u64, + #[prost(uint32, tag = "2")] + pub format: u32, + #[prost(uint32, tag = "3")] + pub chunk: u32, +} +impl ::prost::Name for RequestLoadSnapshotChunk { + const NAME: &'static str = "RequestLoadSnapshotChunk"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// Applies a snapshot chunk +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestApplySnapshotChunk { + #[prost(uint32, tag = "1")] + pub index: u32, + #[prost(bytes = "vec", tag = "2")] + pub chunk: ::prost::alloc::vec::Vec, + #[prost(string, tag = "3")] + pub sender: ::prost::alloc::string::String, +} +impl ::prost::Name for RequestApplySnapshotChunk { + const NAME: &'static str = "RequestApplySnapshotChunk"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestPrepareProposal { + /// the modified transactions cannot exceed this size. + #[prost(int64, tag = "1")] + pub max_tx_bytes: i64, + /// txs is an array of transactions that will be included in a block, + /// sent to the app for possible modifications. + #[prost(bytes = "vec", repeated, tag = "2")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(message, optional, tag = "3")] + pub local_last_commit: ::core::option::Option, + #[prost(message, repeated, tag = "4")] + pub misbehavior: ::prost::alloc::vec::Vec, + #[prost(int64, tag = "5")] + pub height: i64, + #[prost(message, optional, tag = "6")] + pub time: ::core::option::Option<::pbjson_types::Timestamp>, + #[prost(bytes = "vec", tag = "7")] + pub next_validators_hash: ::prost::alloc::vec::Vec, + /// address of the public key of the validator proposing the block. + #[prost(bytes = "vec", tag = "8")] + pub proposer_address: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for RequestPrepareProposal { + const NAME: &'static str = "RequestPrepareProposal"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RequestProcessProposal { + #[prost(bytes = "vec", repeated, tag = "1")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + #[prost(message, optional, tag = "2")] + pub proposed_last_commit: ::core::option::Option, + #[prost(message, repeated, tag = "3")] + pub misbehavior: ::prost::alloc::vec::Vec, + /// hash is the merkle root hash of the fields of the proposed block. + #[prost(bytes = "vec", tag = "4")] + pub hash: ::prost::alloc::vec::Vec, + #[prost(int64, tag = "5")] + pub height: i64, + #[prost(message, optional, tag = "6")] + pub time: ::core::option::Option<::pbjson_types::Timestamp>, + #[prost(bytes = "vec", tag = "7")] + pub next_validators_hash: ::prost::alloc::vec::Vec, + /// address of the public key of the original proposer of the block. + #[prost(bytes = "vec", tag = "8")] + pub proposer_address: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for RequestProcessProposal { + const NAME: &'static str = "RequestProcessProposal"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Response { + #[prost( + oneof = "response::Value", + tags = "1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18" + )] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `Response`. +pub mod response { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(message, tag = "1")] + Exception(super::ResponseException), + #[prost(message, tag = "2")] + Echo(super::ResponseEcho), + #[prost(message, tag = "3")] + Flush(super::ResponseFlush), + #[prost(message, tag = "4")] + Info(super::ResponseInfo), + #[prost(message, tag = "6")] + InitChain(super::ResponseInitChain), + #[prost(message, tag = "7")] + Query(super::ResponseQuery), + #[prost(message, tag = "8")] + BeginBlock(super::ResponseBeginBlock), + #[prost(message, tag = "9")] + CheckTx(super::ResponseCheckTx), + #[prost(message, tag = "10")] + DeliverTx(super::ResponseDeliverTx), + #[prost(message, tag = "11")] + EndBlock(super::ResponseEndBlock), + #[prost(message, tag = "12")] + Commit(super::ResponseCommit), + #[prost(message, tag = "13")] + ListSnapshots(super::ResponseListSnapshots), + #[prost(message, tag = "14")] + OfferSnapshot(super::ResponseOfferSnapshot), + #[prost(message, tag = "15")] + LoadSnapshotChunk(super::ResponseLoadSnapshotChunk), + #[prost(message, tag = "16")] + ApplySnapshotChunk(super::ResponseApplySnapshotChunk), + #[prost(message, tag = "17")] + PrepareProposal(super::ResponsePrepareProposal), + #[prost(message, tag = "18")] + ProcessProposal(super::ResponseProcessProposal), + } +} +impl ::prost::Name for Response { + const NAME: &'static str = "Response"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// nondeterministic +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseException { + #[prost(string, tag = "1")] + pub error: ::prost::alloc::string::String, +} +impl ::prost::Name for ResponseException { + const NAME: &'static str = "ResponseException"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseEcho { + #[prost(string, tag = "1")] + pub message: ::prost::alloc::string::String, +} +impl ::prost::Name for ResponseEcho { + const NAME: &'static str = "ResponseEcho"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseFlush {} +impl ::prost::Name for ResponseFlush { + const NAME: &'static str = "ResponseFlush"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseInfo { + #[prost(string, tag = "1")] + pub data: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub version: ::prost::alloc::string::String, + #[prost(uint64, tag = "3")] + pub app_version: u64, + #[prost(int64, tag = "4")] + pub last_block_height: i64, + #[prost(bytes = "vec", tag = "5")] + pub last_block_app_hash: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for ResponseInfo { + const NAME: &'static str = "ResponseInfo"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseInitChain { + #[prost(message, optional, tag = "1")] + pub consensus_params: ::core::option::Option, + #[prost(message, repeated, tag = "2")] + pub validators: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "3")] + pub app_hash: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for ResponseInitChain { + const NAME: &'static str = "ResponseInitChain"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseQuery { + #[prost(uint32, tag = "1")] + pub code: u32, + /// bytes data = 2; // use "value" instead. + /// + /// nondeterministic + #[prost(string, tag = "3")] + pub log: ::prost::alloc::string::String, + /// nondeterministic + #[prost(string, tag = "4")] + pub info: ::prost::alloc::string::String, + #[prost(int64, tag = "5")] + pub index: i64, + #[prost(bytes = "vec", tag = "6")] + pub key: ::prost::alloc::vec::Vec, + #[prost(bytes = "vec", tag = "7")] + pub value: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "8")] + pub proof_ops: ::core::option::Option, + #[prost(int64, tag = "9")] + pub height: i64, + #[prost(string, tag = "10")] + pub codespace: ::prost::alloc::string::String, +} +impl ::prost::Name for ResponseQuery { + const NAME: &'static str = "ResponseQuery"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseBeginBlock { + #[prost(message, repeated, tag = "1")] + pub events: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for ResponseBeginBlock { + const NAME: &'static str = "ResponseBeginBlock"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseCheckTx { + #[prost(uint32, tag = "1")] + pub code: u32, + #[prost(bytes = "vec", tag = "2")] + pub data: ::prost::alloc::vec::Vec, + /// nondeterministic + #[prost(string, tag = "3")] + pub log: ::prost::alloc::string::String, + /// nondeterministic + #[prost(string, tag = "4")] + pub info: ::prost::alloc::string::String, + #[prost(int64, tag = "5")] + pub gas_wanted: i64, + #[prost(int64, tag = "6")] + pub gas_used: i64, + #[prost(message, repeated, tag = "7")] + pub events: ::prost::alloc::vec::Vec, + #[prost(string, tag = "8")] + pub codespace: ::prost::alloc::string::String, + #[prost(string, tag = "9")] + pub sender: ::prost::alloc::string::String, + #[prost(int64, tag = "10")] + pub priority: i64, + /// mempool_error is set by CometBFT. + /// ABCI applictions creating a ResponseCheckTX should not set mempool_error. + #[prost(string, tag = "11")] + pub mempool_error: ::prost::alloc::string::String, +} +impl ::prost::Name for ResponseCheckTx { + const NAME: &'static str = "ResponseCheckTx"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseDeliverTx { + #[prost(uint32, tag = "1")] + pub code: u32, + #[prost(bytes = "vec", tag = "2")] + pub data: ::prost::alloc::vec::Vec, + /// nondeterministic + #[prost(string, tag = "3")] + pub log: ::prost::alloc::string::String, + /// nondeterministic + #[prost(string, tag = "4")] + pub info: ::prost::alloc::string::String, + #[prost(int64, tag = "5")] + pub gas_wanted: i64, + #[prost(int64, tag = "6")] + pub gas_used: i64, + /// nondeterministic + #[prost(message, repeated, tag = "7")] + pub events: ::prost::alloc::vec::Vec, + #[prost(string, tag = "8")] + pub codespace: ::prost::alloc::string::String, +} +impl ::prost::Name for ResponseDeliverTx { + const NAME: &'static str = "ResponseDeliverTx"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseEndBlock { + #[prost(message, repeated, tag = "1")] + pub validator_updates: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub consensus_param_updates: ::core::option::Option, + #[prost(message, repeated, tag = "3")] + pub events: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for ResponseEndBlock { + const NAME: &'static str = "ResponseEndBlock"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseCommit { + /// reserve 1 + #[prost(bytes = "vec", tag = "2")] + pub data: ::prost::alloc::vec::Vec, + #[prost(int64, tag = "3")] + pub retain_height: i64, +} +impl ::prost::Name for ResponseCommit { + const NAME: &'static str = "ResponseCommit"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseListSnapshots { + #[prost(message, repeated, tag = "1")] + pub snapshots: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for ResponseListSnapshots { + const NAME: &'static str = "ResponseListSnapshots"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseOfferSnapshot { + #[prost(enumeration = "response_offer_snapshot::Result", tag = "1")] + pub result: i32, +} +/// Nested message and enum types in `ResponseOfferSnapshot`. +pub mod response_offer_snapshot { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Result { + /// Unknown result, abort all snapshot restoration + Unknown = 0, + /// Snapshot accepted, apply chunks + Accept = 1, + /// Abort all snapshot restoration + Abort = 2, + /// Reject this specific snapshot, try others + Reject = 3, + /// Reject all snapshots of this format, try others + RejectFormat = 4, + /// Reject all snapshots from the sender(s), try others + RejectSender = 5, + } + impl Result { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Result::Unknown => "UNKNOWN", + Result::Accept => "ACCEPT", + Result::Abort => "ABORT", + Result::Reject => "REJECT", + Result::RejectFormat => "REJECT_FORMAT", + Result::RejectSender => "REJECT_SENDER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "ACCEPT" => Some(Self::Accept), + "ABORT" => Some(Self::Abort), + "REJECT" => Some(Self::Reject), + "REJECT_FORMAT" => Some(Self::RejectFormat), + "REJECT_SENDER" => Some(Self::RejectSender), + _ => None, + } + } + } +} +impl ::prost::Name for ResponseOfferSnapshot { + const NAME: &'static str = "ResponseOfferSnapshot"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseLoadSnapshotChunk { + #[prost(bytes = "vec", tag = "1")] + pub chunk: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for ResponseLoadSnapshotChunk { + const NAME: &'static str = "ResponseLoadSnapshotChunk"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseApplySnapshotChunk { + #[prost(enumeration = "response_apply_snapshot_chunk::Result", tag = "1")] + pub result: i32, + /// Chunks to refetch and reapply + #[prost(uint32, repeated, tag = "2")] + pub refetch_chunks: ::prost::alloc::vec::Vec, + /// Chunk senders to reject and ban + #[prost(string, repeated, tag = "3")] + pub reject_senders: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Nested message and enum types in `ResponseApplySnapshotChunk`. +pub mod response_apply_snapshot_chunk { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Result { + /// Unknown result, abort all snapshot restoration + Unknown = 0, + /// Chunk successfully accepted + Accept = 1, + /// Abort all snapshot restoration + Abort = 2, + /// Retry chunk (combine with refetch and reject) + Retry = 3, + /// Retry snapshot (combine with refetch and reject) + RetrySnapshot = 4, + /// Reject this snapshot, try others + RejectSnapshot = 5, + } + impl Result { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Result::Unknown => "UNKNOWN", + Result::Accept => "ACCEPT", + Result::Abort => "ABORT", + Result::Retry => "RETRY", + Result::RetrySnapshot => "RETRY_SNAPSHOT", + Result::RejectSnapshot => "REJECT_SNAPSHOT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "ACCEPT" => Some(Self::Accept), + "ABORT" => Some(Self::Abort), + "RETRY" => Some(Self::Retry), + "RETRY_SNAPSHOT" => Some(Self::RetrySnapshot), + "REJECT_SNAPSHOT" => Some(Self::RejectSnapshot), + _ => None, + } + } + } +} +impl ::prost::Name for ResponseApplySnapshotChunk { + const NAME: &'static str = "ResponseApplySnapshotChunk"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponsePrepareProposal { + #[prost(bytes = "vec", repeated, tag = "1")] + pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, +} +impl ::prost::Name for ResponsePrepareProposal { + const NAME: &'static str = "ResponsePrepareProposal"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResponseProcessProposal { + #[prost(enumeration = "response_process_proposal::ProposalStatus", tag = "1")] + pub status: i32, +} +/// Nested message and enum types in `ResponseProcessProposal`. +pub mod response_process_proposal { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum ProposalStatus { + Unknown = 0, + Accept = 1, + Reject = 2, + } + impl ProposalStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ProposalStatus::Unknown => "UNKNOWN", + ProposalStatus::Accept => "ACCEPT", + ProposalStatus::Reject => "REJECT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "ACCEPT" => Some(Self::Accept), + "REJECT" => Some(Self::Reject), + _ => None, + } + } + } +} +impl ::prost::Name for ResponseProcessProposal { + const NAME: &'static str = "ResponseProcessProposal"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CommitInfo { + #[prost(int32, tag = "1")] + pub round: i32, + #[prost(message, repeated, tag = "2")] + pub votes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for CommitInfo { + const NAME: &'static str = "CommitInfo"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExtendedCommitInfo { + /// The round at which the block proposer decided in the previous height. + #[prost(int32, tag = "1")] + pub round: i32, + /// List of validators' addresses in the last validator set with their voting + /// information, including vote extensions. + #[prost(message, repeated, tag = "2")] + pub votes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for ExtendedCommitInfo { + const NAME: &'static str = "ExtendedCommitInfo"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// Event allows application developers to attach additional information to +/// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +/// Later, transactions may be queried using these events. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Event { + #[prost(string, tag = "1")] + pub r#type: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub attributes: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for Event { + const NAME: &'static str = "Event"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// EventAttribute is a single key-value pair, associated with an event. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventAttribute { + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub value: ::prost::alloc::string::String, + /// nondeterministic + #[prost(bool, tag = "3")] + pub index: bool, +} +impl ::prost::Name for EventAttribute { + const NAME: &'static str = "EventAttribute"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// TxResult contains results of executing the transaction. +/// +/// One usage is indexing transaction results. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TxResult { + #[prost(int64, tag = "1")] + pub height: i64, + #[prost(uint32, tag = "2")] + pub index: u32, + #[prost(bytes = "vec", tag = "3")] + pub tx: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "4")] + pub result: ::core::option::Option, +} +impl ::prost::Name for TxResult { + const NAME: &'static str = "TxResult"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// Validator +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Validator { + /// The first 20 bytes of SHA256(public key) + #[prost(bytes = "vec", tag = "1")] + pub address: ::prost::alloc::vec::Vec, + /// PubKey pub_key = 2 \[(gogoproto.nullable)=false\]; + /// + /// The voting power + #[prost(int64, tag = "3")] + pub power: i64, +} +impl ::prost::Name for Validator { + const NAME: &'static str = "Validator"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// ValidatorUpdate +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorUpdate { + #[prost(message, optional, tag = "1")] + pub pub_key: ::core::option::Option, + #[prost(int64, tag = "2")] + pub power: i64, +} +impl ::prost::Name for ValidatorUpdate { + const NAME: &'static str = "ValidatorUpdate"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +/// VoteInfo +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VoteInfo { + #[prost(message, optional, tag = "1")] + pub validator: ::core::option::Option, + #[prost(bool, tag = "2")] + pub signed_last_block: bool, +} +impl ::prost::Name for VoteInfo { + const NAME: &'static str = "VoteInfo"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExtendedVoteInfo { + #[prost(message, optional, tag = "1")] + pub validator: ::core::option::Option, + #[prost(bool, tag = "2")] + pub signed_last_block: bool, + /// Reserved for future use + #[prost(bytes = "vec", tag = "3")] + pub vote_extension: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for ExtendedVoteInfo { + const NAME: &'static str = "ExtendedVoteInfo"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Misbehavior { + #[prost(enumeration = "MisbehaviorType", tag = "1")] + pub r#type: i32, + /// The offending validator + #[prost(message, optional, tag = "2")] + pub validator: ::core::option::Option, + /// The height when the offense occurred + #[prost(int64, tag = "3")] + pub height: i64, + /// The corresponding time where the offense occurred + #[prost(message, optional, tag = "4")] + pub time: ::core::option::Option<::pbjson_types::Timestamp>, + /// Total voting power of the validator set in case the ABCI application does + /// not store historical validators. + /// + #[prost(int64, tag = "5")] + pub total_voting_power: i64, +} +impl ::prost::Name for Misbehavior { + const NAME: &'static str = "Misbehavior"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Snapshot { + /// The height at which the snapshot was taken + #[prost(uint64, tag = "1")] + pub height: u64, + /// The application-specific snapshot format + #[prost(uint32, tag = "2")] + pub format: u32, + /// Number of chunks in the snapshot + #[prost(uint32, tag = "3")] + pub chunks: u32, + /// Arbitrary snapshot hash, equal only if identical + #[prost(bytes = "vec", tag = "4")] + pub hash: ::prost::alloc::vec::Vec, + /// Arbitrary application metadata + #[prost(bytes = "vec", tag = "5")] + pub metadata: ::prost::alloc::vec::Vec, +} +impl ::prost::Name for Snapshot { + const NAME: &'static str = "Snapshot"; + const PACKAGE: &'static str = "tendermint.abci"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.abci.{}", Self::NAME) + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum CheckTxType { + New = 0, + Recheck = 1, +} +impl CheckTxType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + CheckTxType::New => "NEW", + CheckTxType::Recheck => "RECHECK", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NEW" => Some(Self::New), + "RECHECK" => Some(Self::Recheck), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum MisbehaviorType { + Unknown = 0, + DuplicateVote = 1, + LightClientAttack = 2, +} +impl MisbehaviorType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + MisbehaviorType::Unknown => "UNKNOWN", + MisbehaviorType::DuplicateVote => "DUPLICATE_VOTE", + MisbehaviorType::LightClientAttack => "LIGHT_CLIENT_ATTACK", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "DUPLICATE_VOTE" => Some(Self::DuplicateVote), + "LIGHT_CLIENT_ATTACK" => Some(Self::LightClientAttack), + _ => None, + } + } +} +/// Generated client implementations. +#[cfg(feature = "rpc")] +pub mod abci_application_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct AbciApplicationClient { + inner: tonic::client::Grpc, + } + impl AbciApplicationClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AbciApplicationClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AbciApplicationClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + AbciApplicationClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn echo( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/Echo", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("tendermint.abci.ABCIApplication", "Echo")); + self.inner.unary(req, path, codec).await + } + pub async fn flush( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/Flush", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("tendermint.abci.ABCIApplication", "Flush")); + self.inner.unary(req, path, codec).await + } + pub async fn info( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/Info", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("tendermint.abci.ABCIApplication", "Info")); + self.inner.unary(req, path, codec).await + } + pub async fn deliver_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/DeliverTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("tendermint.abci.ABCIApplication", "DeliverTx")); + self.inner.unary(req, path, codec).await + } + pub async fn check_tx( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/CheckTx", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("tendermint.abci.ABCIApplication", "CheckTx")); + self.inner.unary(req, path, codec).await + } + pub async fn query( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/Query", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("tendermint.abci.ABCIApplication", "Query")); + self.inner.unary(req, path, codec).await + } + pub async fn commit( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/Commit", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("tendermint.abci.ABCIApplication", "Commit")); + self.inner.unary(req, path, codec).await + } + pub async fn init_chain( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/InitChain", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("tendermint.abci.ABCIApplication", "InitChain")); + self.inner.unary(req, path, codec).await + } + pub async fn begin_block( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/BeginBlock", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("tendermint.abci.ABCIApplication", "BeginBlock"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn end_block( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/EndBlock", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("tendermint.abci.ABCIApplication", "EndBlock")); + self.inner.unary(req, path, codec).await + } + pub async fn list_snapshots( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/ListSnapshots", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("tendermint.abci.ABCIApplication", "ListSnapshots"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn offer_snapshot( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/OfferSnapshot", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("tendermint.abci.ABCIApplication", "OfferSnapshot"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn load_snapshot_chunk( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/LoadSnapshotChunk", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "tendermint.abci.ABCIApplication", + "LoadSnapshotChunk", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn apply_snapshot_chunk( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/ApplySnapshotChunk", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "tendermint.abci.ABCIApplication", + "ApplySnapshotChunk", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn prepare_proposal( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/PrepareProposal", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("tendermint.abci.ABCIApplication", "PrepareProposal"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn process_proposal( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/tendermint.abci.ABCIApplication/ProcessProposal", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("tendermint.abci.ABCIApplication", "ProcessProposal"), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +#[cfg(feature = "rpc")] +pub mod abci_application_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with AbciApplicationServer. + #[async_trait] + pub trait AbciApplication: Send + Sync + 'static { + async fn echo( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn flush( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn info( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn deliver_tx( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn check_tx( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn query( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn commit( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn init_chain( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn begin_block( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn end_block( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn list_snapshots( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn offer_snapshot( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn load_snapshot_chunk( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn apply_snapshot_chunk( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn prepare_proposal( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn process_proposal( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct AbciApplicationServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl AbciApplicationServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for AbciApplicationServer + where + T: AbciApplication, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/tendermint.abci.ABCIApplication/Echo" => { + #[allow(non_camel_case_types)] + struct EchoSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService for EchoSvc { + type Response = super::ResponseEcho; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::echo(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = EchoSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/Flush" => { + #[allow(non_camel_case_types)] + struct FlushSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService for FlushSvc { + type Response = super::ResponseFlush; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::flush(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = FlushSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/Info" => { + #[allow(non_camel_case_types)] + struct InfoSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService for InfoSvc { + type Response = super::ResponseInfo; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::info(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = InfoSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/DeliverTx" => { + #[allow(non_camel_case_types)] + struct DeliverTxSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for DeliverTxSvc { + type Response = super::ResponseDeliverTx; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::deliver_tx(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeliverTxSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/CheckTx" => { + #[allow(non_camel_case_types)] + struct CheckTxSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for CheckTxSvc { + type Response = super::ResponseCheckTx; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::check_tx(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CheckTxSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/Query" => { + #[allow(non_camel_case_types)] + struct QuerySvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService for QuerySvc { + type Response = super::ResponseQuery; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::query(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = QuerySvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/Commit" => { + #[allow(non_camel_case_types)] + struct CommitSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for CommitSvc { + type Response = super::ResponseCommit; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::commit(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CommitSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/InitChain" => { + #[allow(non_camel_case_types)] + struct InitChainSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for InitChainSvc { + type Response = super::ResponseInitChain; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::init_chain(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = InitChainSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/BeginBlock" => { + #[allow(non_camel_case_types)] + struct BeginBlockSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for BeginBlockSvc { + type Response = super::ResponseBeginBlock; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::begin_block(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = BeginBlockSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/EndBlock" => { + #[allow(non_camel_case_types)] + struct EndBlockSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for EndBlockSvc { + type Response = super::ResponseEndBlock; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::end_block(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = EndBlockSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/ListSnapshots" => { + #[allow(non_camel_case_types)] + struct ListSnapshotsSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for ListSnapshotsSvc { + type Response = super::ResponseListSnapshots; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_snapshots(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ListSnapshotsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/OfferSnapshot" => { + #[allow(non_camel_case_types)] + struct OfferSnapshotSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for OfferSnapshotSvc { + type Response = super::ResponseOfferSnapshot; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::offer_snapshot(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = OfferSnapshotSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/LoadSnapshotChunk" => { + #[allow(non_camel_case_types)] + struct LoadSnapshotChunkSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for LoadSnapshotChunkSvc { + type Response = super::ResponseLoadSnapshotChunk; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::load_snapshot_chunk(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = LoadSnapshotChunkSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/ApplySnapshotChunk" => { + #[allow(non_camel_case_types)] + struct ApplySnapshotChunkSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for ApplySnapshotChunkSvc { + type Response = super::ResponseApplySnapshotChunk; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::apply_snapshot_chunk( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ApplySnapshotChunkSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/PrepareProposal" => { + #[allow(non_camel_case_types)] + struct PrepareProposalSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for PrepareProposalSvc { + type Response = super::ResponsePrepareProposal; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::prepare_proposal(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = PrepareProposalSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/tendermint.abci.ABCIApplication/ProcessProposal" => { + #[allow(non_camel_case_types)] + struct ProcessProposalSvc(pub Arc); + impl< + T: AbciApplication, + > tonic::server::UnaryService + for ProcessProposalSvc { + type Response = super::ResponseProcessProposal; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::process_proposal(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ProcessProposalSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for AbciApplicationServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for AbciApplicationServer { + const NAME: &'static str = "tendermint.abci.ABCIApplication"; + } +} diff --git a/crates/proto/src/gen/tendermint.types.rs b/crates/proto/src/gen/tendermint.types.rs index 8ea4e48fe9..dd05ee65c4 100644 --- a/crates/proto/src/gen/tendermint.types.rs +++ b/crates/proto/src/gen/tendermint.types.rs @@ -502,3 +502,121 @@ impl ::prost::Name for Block { ::prost::alloc::format!("tendermint.types.{}", Self::NAME) } } +/// ConsensusParams contains consensus critical parameters that determine the +/// validity of blocks. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConsensusParams { + #[prost(message, optional, tag = "1")] + pub block: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub evidence: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub validator: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub version: ::core::option::Option, +} +impl ::prost::Name for ConsensusParams { + const NAME: &'static str = "ConsensusParams"; + const PACKAGE: &'static str = "tendermint.types"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.types.{}", Self::NAME) + } +} +/// BlockParams contains limits on the block size. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockParams { + /// Max block size, in bytes. + /// Note: must be greater than 0 + #[prost(int64, tag = "1")] + pub max_bytes: i64, + /// Max gas per block. + /// Note: must be greater or equal to -1 + #[prost(int64, tag = "2")] + pub max_gas: i64, +} +impl ::prost::Name for BlockParams { + const NAME: &'static str = "BlockParams"; + const PACKAGE: &'static str = "tendermint.types"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.types.{}", Self::NAME) + } +} +/// EvidenceParams determine how we handle evidence of malfeasance. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EvidenceParams { + /// Max age of evidence, in blocks. + /// + /// The basic formula for calculating this is: MaxAgeDuration / {average block + /// time}. + #[prost(int64, tag = "1")] + pub max_age_num_blocks: i64, + /// Max age of evidence, in time. + /// + /// It should correspond with an app's "unbonding period" or other similar + /// mechanism for handling [Nothing-At-Stake + /// attacks](). + #[prost(message, optional, tag = "2")] + pub max_age_duration: ::core::option::Option<::pbjson_types::Duration>, + /// This sets the maximum size of total evidence in bytes that can be committed in a single block. + /// and should fall comfortably under the max block bytes. + /// Default is 1048576 or 1MB + #[prost(int64, tag = "3")] + pub max_bytes: i64, +} +impl ::prost::Name for EvidenceParams { + const NAME: &'static str = "EvidenceParams"; + const PACKAGE: &'static str = "tendermint.types"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.types.{}", Self::NAME) + } +} +/// ValidatorParams restrict the public key types validators can use. +/// NOTE: uses ABCI pubkey naming, not Amino names. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ValidatorParams { + #[prost(string, repeated, tag = "1")] + pub pub_key_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +impl ::prost::Name for ValidatorParams { + const NAME: &'static str = "ValidatorParams"; + const PACKAGE: &'static str = "tendermint.types"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.types.{}", Self::NAME) + } +} +/// VersionParams contains the ABCI application version. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VersionParams { + #[prost(uint64, tag = "1")] + pub app: u64, +} +impl ::prost::Name for VersionParams { + const NAME: &'static str = "VersionParams"; + const PACKAGE: &'static str = "tendermint.types"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.types.{}", Self::NAME) + } +} +/// HashedParams is a subset of ConsensusParams. +/// +/// It is hashed into the Header.ConsensusHash. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HashedParams { + #[prost(int64, tag = "1")] + pub block_max_bytes: i64, + #[prost(int64, tag = "2")] + pub block_max_gas: i64, +} +impl ::prost::Name for HashedParams { + const NAME: &'static str = "HashedParams"; + const PACKAGE: &'static str = "tendermint.types"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("tendermint.types.{}", Self::NAME) + } +} diff --git a/crates/proto/src/lib.rs b/crates/proto/src/lib.rs index 42bd785867..d48882a2c2 100644 --- a/crates/proto/src/lib.rs +++ b/crates/proto/src/lib.rs @@ -276,6 +276,76 @@ pub mod tendermint { pub mod p2p { include!("gen/tendermint.p2p.rs"); } + + pub mod abci { + include!("gen/tendermint.abci.rs"); + } +} + +pub mod noble { + pub mod forwarding { + pub mod v1 { + include!("gen/noble.forwarding.v1.rs"); + } + } +} + +pub mod cosmos { + pub mod base { + pub mod v1beta1 { + include!("gen/cosmos.base.v1beta1.rs"); + } + + pub mod query { + pub mod v1beta1 { + include!("gen/cosmos.base.query.v1beta1.rs"); + } + } + + pub mod abci { + pub mod v1beta1 { + include!("gen/cosmos.base.abci.v1beta1.rs"); + } + } + } + + pub mod auth { + pub mod v1beta1 { + include!("gen/cosmos.auth.v1beta1.rs"); + } + } + + pub mod bank { + pub mod v1beta1 { + include!("gen/cosmos.bank.v1beta1.rs"); + } + } + + pub mod tx { + pub mod v1beta1 { + include!("gen/cosmos.tx.v1beta1.rs"); + } + + pub mod config { + pub mod v1 { + include!("gen/cosmos.tx.config.v1.rs"); + } + } + + pub mod signing { + pub mod v1beta1 { + include!("gen/cosmos.tx.signing.v1beta1.rs"); + } + } + } + + pub mod crypto { + pub mod multisig { + pub mod v1beta1 { + include!("gen/cosmos.crypto.multisig.v1beta1.rs"); + } + } + } } #[cfg(feature = "rpc")] diff --git a/deployments/scripts/protobuf-codegen b/deployments/scripts/protobuf-codegen index 2765293cb0..9f9b2a7c2d 100755 --- a/deployments/scripts/protobuf-codegen +++ b/deployments/scripts/protobuf-codegen @@ -20,6 +20,7 @@ buf dep update penumbra # Pull our vendored cosmos/IBC proto defs so we can get reflection for service definitions. # The penumbra dependencies will override some of these. +buf export buf.build/noble-assets/forwarding:5a8609a6772d417584a9c60cd8b80881 --output rust-vendored/ buf export buf.build/cosmos/cosmos-sdk:e7a85cef453e4b999ad9aff8714ae05f --output rust-vendored/ buf export buf.build/cosmos/ibc:7ab44ae956a0488ea04e04511efa5f70 --output rust-vendored/ diff --git a/proto/rust-vendored/google/protobuf/any.proto b/proto/rust-vendored/google/protobuf/any.proto new file mode 100644 index 0000000000..58b511583a --- /dev/null +++ b/proto/rust-vendored/google/protobuf/any.proto @@ -0,0 +1,164 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +import "gogoproto/gogo.proto"; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "types"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; + + option (gogoproto.typedecl) = false; + option (gogoproto.goproto_stringer) = false; + option (gogoproto.gostring) = false; + option (gogoproto.stringer) = false; +} + +option (gogoproto.goproto_registration) = false; diff --git a/proto/rust-vendored/noble/forwarding/v1/account.proto b/proto/rust-vendored/noble/forwarding/v1/account.proto new file mode 100644 index 0000000000..0fc9cdba7b --- /dev/null +++ b/proto/rust-vendored/noble/forwarding/v1/account.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package noble.forwarding.v1; + +import "cosmos/auth/v1beta1/auth.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "github.com/noble-assets/forwarding/x/forwarding/types"; + +message ForwardingAccount { + cosmos.auth.v1beta1.BaseAccount base_account = 1 [(gogoproto.embed) = true]; + + string channel = 2; + string recipient = 3; + int64 created_at = 4; +} + +message ForwardingPubKey { + option (gogoproto.goproto_stringer) = false; + + bytes key = 1; +} diff --git a/proto/rust-vendored/noble/forwarding/v1/genesis.proto b/proto/rust-vendored/noble/forwarding/v1/genesis.proto new file mode 100644 index 0000000000..bcbbc9f448 --- /dev/null +++ b/proto/rust-vendored/noble/forwarding/v1/genesis.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package noble.forwarding.v1; + +option go_package = "github.com/noble-assets/forwarding/x/forwarding/types"; + +message GenesisState { + map num_of_accounts = 1; + map num_of_forwards = 2; + map total_forwarded = 3; +} diff --git a/proto/rust-vendored/noble/forwarding/v1/packet.proto b/proto/rust-vendored/noble/forwarding/v1/packet.proto new file mode 100644 index 0000000000..875de708e0 --- /dev/null +++ b/proto/rust-vendored/noble/forwarding/v1/packet.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package noble.forwarding.v1; + +option go_package = "github.com/noble-assets/forwarding/x/forwarding/types"; + +message RegisterAccountData { + string recipient = 1; + string channel = 2; +} + +message RegisterAccountMemo { + message RegisterAccountDataWrapper { + RegisterAccountData forwarding = 1; + } + + RegisterAccountDataWrapper noble = 1; +} diff --git a/proto/rust-vendored/noble/forwarding/v1/query.proto b/proto/rust-vendored/noble/forwarding/v1/query.proto new file mode 100644 index 0000000000..866ce6679e --- /dev/null +++ b/proto/rust-vendored/noble/forwarding/v1/query.proto @@ -0,0 +1,66 @@ +syntax = "proto3"; + +package noble.forwarding.v1; + +import "cosmos/base/v1beta1/coin.proto"; +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; + +option go_package = "github.com/noble-assets/forwarding/x/forwarding/types"; + +service Query { + rpc Address(QueryAddress) returns (QueryAddressResponse) { + option (google.api.http).get = "/noble/forwarding/v1/address/{channel}/{recipient}"; + } + + rpc Stats(QueryStats) returns (QueryStatsResponse) { + option (google.api.http).get = "/noble/forwarding/v1/stats"; + } + + rpc StatsByChannel(QueryStatsByChannel) returns (QueryStatsByChannelResponse) { + option (google.api.http).get = "/noble/forwarding/v1/stats/{channel}"; + } +} + +// + +message QueryAddress { + string channel = 1; + string recipient = 2; +} + +message QueryAddressResponse { + string address = 1; + bool exists = 2; +} + +message QueryStats {} + +message QueryStatsResponse { + map stats = 1 [(gogoproto.nullable) = false]; +} + +message QueryStatsByChannel { + string channel = 1; +} + +message QueryStatsByChannelResponse { + uint64 num_of_accounts = 1; + uint64 num_of_forwards = 2; + repeated cosmos.base.v1beta1.Coin total_forwarded = 3 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins" + ]; +} + +// + +message Stats { + string chain_id = 1; + uint64 num_of_accounts = 2; + uint64 num_of_forwards = 3; + repeated cosmos.base.v1beta1.Coin total_forwarded = 4 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins" + ]; +} diff --git a/proto/rust-vendored/noble/forwarding/v1/tx.proto b/proto/rust-vendored/noble/forwarding/v1/tx.proto new file mode 100644 index 0000000000..0485a193b1 --- /dev/null +++ b/proto/rust-vendored/noble/forwarding/v1/tx.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package noble.forwarding.v1; + +option go_package = "github.com/noble-assets/forwarding/x/forwarding/types"; + +service Msg { + rpc RegisterAccount(noble.forwarding.v1.MsgRegisterAccount) returns (noble.forwarding.v1.MsgRegisterAccountResponse); + rpc ClearAccount(noble.forwarding.v1.MsgClearAccount) returns (noble.forwarding.v1.MsgClearAccountResponse); +} + +// + +message MsgRegisterAccount { + string signer = 1; + string recipient = 2; + string channel = 3; +} + +message MsgRegisterAccountResponse { + string address = 1; +} + +message MsgClearAccount { + string signer = 1; + string address = 2; +} + +message MsgClearAccountResponse {} diff --git a/tools/proto-compiler/src/main.rs b/tools/proto-compiler/src/main.rs index 0dfb4bfeed..8915c3d811 100644 --- a/tools/proto-compiler/src/main.rs +++ b/tools/proto-compiler/src/main.rs @@ -116,13 +116,25 @@ fn main() -> anyhow::Result<()> { "../../proto/penumbra/penumbra/tools/summoning/v1/summoning.proto", "../../proto/penumbra/penumbra/util/tendermint_proxy/v1/tendermint_proxy.proto", "../../proto/penumbra/penumbra/view/v1/view.proto", + "../../proto/rust-vendored/tendermint/abci/types.proto", "../../proto/rust-vendored/tendermint/types/validator.proto", "../../proto/rust-vendored/tendermint/p2p/types.proto", "../../proto/rust-vendored/cosmos/bank/v1beta1/query.proto", + "../../proto/rust-vendored/cosmos/tx/v1beta1/service.proto", + "../../proto/rust-vendored/cosmos/tx/v1beta1/tx.proto", + "../../proto/rust-vendored/cosmos/tx/config/v1/config.proto", + "../../proto/rust-vendored/cosmos/tx/signing/v1beta1/signing.proto", + "../../proto/rust-vendored/cosmos/base/abci/v1beta1/abci.proto", + "../../proto/rust-vendored/cosmos/crypto/multisig/v1beta1/multisig.proto", "../../proto/rust-vendored/ibc/applications/transfer/v1/query.proto", "../../proto/rust-vendored/ibc/core/channel/v1/query.proto", "../../proto/rust-vendored/ibc/core/client/v1/query.proto", "../../proto/rust-vendored/ibc/core/connection/v1/query.proto", + "../../proto/rust-vendored/noble/forwarding/v1/account.proto", + "../../proto/rust-vendored/noble/forwarding/v1/genesis.proto", + "../../proto/rust-vendored/noble/forwarding/v1/packet.proto", + "../../proto/rust-vendored/noble/forwarding/v1/query.proto", + "../../proto/rust-vendored/noble/forwarding/v1/tx.proto", ], &["../../proto/penumbra/", "../../proto/rust-vendored/"], )?; From e16700fd89640028c6135690ae951cf8bd2d76e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Wed, 2 Oct 2024 14:18:48 -0700 Subject: [PATCH 30/43] feat: events for ibc transfers (#4874) ## Describe your changes This adds specific events related to fungible token transfers, recording them as they happen in the shielded pool. First of all, this is a much more legible and readily consumable event around actual IBC events we care about, which is reason enough to have this, imo, but also, there's currently a flaw in the whole event system in that we don't have access, through events only, to the actual acknowledgement data for a packet. In particular, we can't tell using the raw ibc events if a packet was acked successfully, finalizing a transfer, or unsuccessfully, causing the transfer to be refunded. Knowing which of the two matters a lot, and will cause queries like "how much of this asset has been locked in the shielded pool" to not return the right result by quite a bit. ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > Event addition only --- .../shielded-pool/src/component/transfer.rs | 104 ++- .../core/component/shielded-pool/src/event.rs | 63 +- ...enumbra.core.component.shielded_pool.v1.rs | 146 ++++ ...a.core.component.shielded_pool.v1.serde.rs | 646 ++++++++++++++++++ .../proto/src/gen/proto_descriptor.bin.no_lfs | Bin 642967 -> 646138 bytes .../shielded_pool/v1/shielded_pool.proto | 55 ++ 6 files changed, 1004 insertions(+), 10 deletions(-) diff --git a/crates/core/component/shielded-pool/src/component/transfer.rs b/crates/core/component/shielded-pool/src/component/transfer.rs index d75df18540..18308e82ca 100644 --- a/crates/core/component/shielded-pool/src/component/transfer.rs +++ b/crates/core/component/shielded-pool/src/component/transfer.rs @@ -2,7 +2,7 @@ use std::str::FromStr; use crate::{ component::{AssetRegistry, NoteManager}, - Ics20Withdrawal, + event, Ics20Withdrawal, }; use anyhow::{Context, Result}; use async_trait::async_trait; @@ -21,9 +21,11 @@ use ibc_types::{ transfer::acknowledgement::TokenTransferAcknowledgement, }; use penumbra_asset::{asset, asset::Metadata, Value}; +use penumbra_ibc::component::ChannelStateReadExt; use penumbra_keys::Address; use penumbra_num::Amount; use penumbra_proto::{ + core::component::shielded_pool::v1::FungibleTokenTransferPacketMetadata, penumbra::core::component::ibc::v1::FungibleTokenPacketData, StateReadProto, StateWriteProto, }; use penumbra_sct::CommitmentSource; @@ -116,6 +118,23 @@ pub trait Ics20TransferWriteExt: StateWrite { ), new_value_balance, ); + self.record_proto(event::outbound_fungible_token_transfer( + Value { + amount: withdrawal.amount, + asset_id: withdrawal.denom.id(), + }, + &withdrawal.return_address, + withdrawal.destination_chain_address.clone(), + FungibleTokenTransferPacketMetadata { + channel: withdrawal.source_channel.0.clone(), + sequence: self + .get_send_sequence( + &withdrawal.source_channel, + &checked_packet.source_port(), + ) + .await?, + }, + )); } else { // receiver is the source, burn utxos @@ -149,6 +168,23 @@ pub trait Ics20TransferWriteExt: StateWrite { ), new_value_balance, ); + self.record_proto(event::outbound_fungible_token_transfer( + Value { + amount: withdrawal.amount, + asset_id: withdrawal.denom.id(), + }, + &withdrawal.return_address, + withdrawal.destination_chain_address.clone(), + FungibleTokenTransferPacketMetadata { + channel: withdrawal.source_channel.0.clone(), + sequence: self + .get_send_sequence( + &withdrawal.source_channel, + &checked_packet.source_port(), + ) + .await?, + }, + )); } self.send_packet_execute(checked_packet).await; @@ -352,6 +388,15 @@ async fn recv_transfer_packet_inner( state_key::ics20_value_balance::by_asset_id(&msg.packet.chan_on_b, &denom.id()), new_value_balance, ); + state.record_proto(event::inbound_fungible_token_transfer( + value, + packet_data.sender.clone(), + &receiver_address, + FungibleTokenTransferPacketMetadata { + channel: msg.packet.chan_on_a.0.clone(), + sequence: msg.packet.sequence.0, + }, + )); } else { // create new denom: // @@ -403,13 +448,26 @@ async fn recv_transfer_packet_inner( state_key::ics20_value_balance::by_asset_id(&msg.packet.chan_on_b, &denom.id()), new_value_balance, ); + state.record_proto(event::inbound_fungible_token_transfer( + value, + packet_data.sender.clone(), + &receiver_address, + FungibleTokenTransferPacketMetadata { + channel: msg.packet.chan_on_a.0.clone(), + sequence: msg.packet.sequence.0, + }, + )); } Ok(()) } // see: https://github.com/cosmos/ibc/blob/8326e26e7e1188b95c32481ff00348a705b23700/spec/app/ics-020-fungible-token-transfer/README.md?plain=1#L297 -async fn refund_tokens(mut state: S, packet: &Packet) -> Result<()> { +async fn refund_tokens( + mut state: S, + packet: &Packet, + reason: event::FungibleTokenRefundReason, +) -> Result<()> { let packet_data: FungibleTokenPacketData = serde_json::from_slice(packet.data.as_slice())?; let denom: asset::Metadata = packet_data // CRITICAL: verify that this denom is validated in upstream timeout handling .denom @@ -469,6 +527,17 @@ async fn refund_tokens(mut state: S, packet: &Packet) -> Result<( state_key::ics20_value_balance::by_asset_id(&packet.chan_on_a, &denom.id()), new_value_balance, ); + state.record_proto(event::outbound_fungible_token_refund( + value, + &receiver, // note, this comes from packet_data.sender + packet_data.receiver.clone(), + reason, + // Use the destination channel, i.e. our name for it, to be consistent across events. + FungibleTokenTransferPacketMetadata { + channel: packet.chan_on_b.0.clone(), + sequence: packet.sequence.0, + }, + )); } else { let value_balance: Amount = state .get(&state_key::ics20_value_balance::by_asset_id( @@ -497,6 +566,17 @@ async fn refund_tokens(mut state: S, packet: &Packet) -> Result<( state_key::ics20_value_balance::by_asset_id(&packet.chan_on_a, &denom.id()), new_value_balance, ); + // note, order flipped relative to the event. + state.record_proto(event::outbound_fungible_token_refund( + value, + &receiver, // note, this comes from packet_data.sender + packet_data.receiver.clone(), + reason, + FungibleTokenTransferPacketMetadata { + channel: packet.chan_on_b.0.clone(), + sequence: packet.sequence.0, + }, + )); } Ok(()) @@ -535,9 +615,13 @@ impl AppHandlerExecute for Ics20Transfer { async fn timeout_packet_execute(mut state: S, msg: &MsgTimeout) -> Result<()> { // timeouts may fail due to counterparty chains sending transfers of u128-1 - refund_tokens(&mut state, &msg.packet) - .await - .context("able to timeout packet")?; + refund_tokens( + &mut state, + &msg.packet, + event::FungibleTokenRefundReason::Timeout, + ) + .await + .context("able to timeout packet")?; Ok(()) } @@ -552,9 +636,13 @@ impl AppHandlerExecute for Ics20Transfer { // in the case where a counterparty chain acknowledges a packet with an error, // for example due to a middleware processing issue or other behavior, // the funds should be unescrowed back to the packet sender. - refund_tokens(&mut state, &msg.packet) - .await - .context("unable to refund packet acknowledgement")?; + refund_tokens( + &mut state, + &msg.packet, + event::FungibleTokenRefundReason::Error, + ) + .await + .context("unable to refund packet acknowledgement")?; } Ok(()) diff --git a/crates/core/component/shielded-pool/src/event.rs b/crates/core/component/shielded-pool/src/event.rs index 3280655f1b..f13f1398c0 100644 --- a/crates/core/component/shielded-pool/src/event.rs +++ b/crates/core/component/shielded-pool/src/event.rs @@ -1,7 +1,12 @@ +use penumbra_asset::Value; +use penumbra_keys::Address; +use penumbra_proto::core::component::shielded_pool::v1::{ + event_outbound_fungible_token_refund::Reason, EventInboundFungibleTokenTransfer, + EventOutboundFungibleTokenRefund, EventOutboundFungibleTokenTransfer, EventOutput, EventSpend, + FungibleTokenTransferPacketMetadata, +}; use penumbra_sct::Nullifier; -use penumbra_proto::core::component::shielded_pool::v1::{EventOutput, EventSpend}; - use crate::NotePayload; // These are sort of like the proto/domain type From impls, because @@ -18,3 +23,57 @@ pub fn output(note_payload: &NotePayload) -> EventOutput { note_commitment: Some(note_payload.note_commitment.into()), } } + +pub fn outbound_fungible_token_transfer( + value: Value, + sender: &Address, + receiver: String, + meta: FungibleTokenTransferPacketMetadata, +) -> EventOutboundFungibleTokenTransfer { + EventOutboundFungibleTokenTransfer { + value: Some(value.into()), + sender: Some(sender.into()), + receiver, + meta: Some(meta), + } +} + +#[derive(Clone, Copy, Debug)] +pub enum FungibleTokenRefundReason { + Timeout, + Error, +} + +pub fn outbound_fungible_token_refund( + value: Value, + sender: &Address, + receiver: String, + reason: FungibleTokenRefundReason, + meta: FungibleTokenTransferPacketMetadata, +) -> EventOutboundFungibleTokenRefund { + let reason = match reason { + FungibleTokenRefundReason::Timeout => Reason::Timeout, + FungibleTokenRefundReason::Error => Reason::Error, + }; + EventOutboundFungibleTokenRefund { + value: Some(value.into()), + sender: Some(sender.into()), + receiver, + reason: reason as i32, + meta: Some(meta), + } +} + +pub fn inbound_fungible_token_transfer( + value: Value, + sender: String, + receiver: &Address, + meta: FungibleTokenTransferPacketMetadata, +) -> EventInboundFungibleTokenTransfer { + EventInboundFungibleTokenTransfer { + value: Some(value.into()), + sender, + receiver: Some(receiver.into()), + meta: Some(meta), + } +} diff --git a/crates/proto/src/gen/penumbra.core.component.shielded_pool.v1.rs b/crates/proto/src/gen/penumbra.core.component.shielded_pool.v1.rs index 31d9776a20..1362033de8 100644 --- a/crates/proto/src/gen/penumbra.core.component.shielded_pool.v1.rs +++ b/crates/proto/src/gen/penumbra.core.component.shielded_pool.v1.rs @@ -732,6 +732,152 @@ impl ::prost::Name for AssetMetadataByIdsResponse { ) } } +/// Metadata about the packet associated with the transfer. +/// +/// This allows identifying which specific packet is associated with the transfer. +/// Implicitly, both ports are going to be "transfer". +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FungibleTokenTransferPacketMetadata { + /// The identifier for the channel on *this* chain. + #[prost(string, tag = "1")] + pub channel: ::prost::alloc::string::String, + /// Sequence number for the packet. + #[prost(uint64, tag = "2")] + pub sequence: u64, +} +impl ::prost::Name for FungibleTokenTransferPacketMetadata { + const NAME: &'static str = "FungibleTokenTransferPacketMetadata"; + const PACKAGE: &'static str = "penumbra.core.component.shielded_pool.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!( + "penumbra.core.component.shielded_pool.v1.{}", Self::NAME + ) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventOutboundFungibleTokenTransfer { + /// The value being transferred out of the chain. + #[prost(message, optional, tag = "1")] + pub value: ::core::option::Option, + /// The sending address on chain. + #[prost(message, optional, tag = "2")] + pub sender: ::core::option::Option, + /// The receiving address, which we don't assume anything about. + #[prost(string, tag = "3")] + pub receiver: ::prost::alloc::string::String, + #[prost(message, optional, tag = "4")] + pub meta: ::core::option::Option, +} +impl ::prost::Name for EventOutboundFungibleTokenTransfer { + const NAME: &'static str = "EventOutboundFungibleTokenTransfer"; + const PACKAGE: &'static str = "penumbra.core.component.shielded_pool.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!( + "penumbra.core.component.shielded_pool.v1.{}", Self::NAME + ) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventOutboundFungibleTokenRefund { + /// The value being refunded. + #[prost(message, optional, tag = "1")] + pub value: ::core::option::Option, + /// The sender being refunded. + #[prost(message, optional, tag = "2")] + pub sender: ::core::option::Option, + /// The address that attempted to receive the funds. + #[prost(string, tag = "3")] + pub receiver: ::prost::alloc::string::String, + /// Why the refund is happening. + #[prost(enumeration = "event_outbound_fungible_token_refund::Reason", tag = "4")] + pub reason: i32, + /// This will be the metadata for the packet for the transfer being refunded. + /// + /// This allows linking a refund to the transfer. + #[prost(message, optional, tag = "5")] + pub meta: ::core::option::Option, +} +/// Nested message and enum types in `EventOutboundFungibleTokenRefund`. +pub mod event_outbound_fungible_token_refund { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Reason { + /// No particular reason. + Unspecified = 0, + /// The transfer timed out. + Timeout = 1, + /// The transfer was acknowledged with an error. + Error = 2, + } + impl Reason { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Reason::Unspecified => "REASON_UNSPECIFIED", + Reason::Timeout => "REASON_TIMEOUT", + Reason::Error => "REASON_ERROR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "REASON_UNSPECIFIED" => Some(Self::Unspecified), + "REASON_TIMEOUT" => Some(Self::Timeout), + "REASON_ERROR" => Some(Self::Error), + _ => None, + } + } + } +} +impl ::prost::Name for EventOutboundFungibleTokenRefund { + const NAME: &'static str = "EventOutboundFungibleTokenRefund"; + const PACKAGE: &'static str = "penumbra.core.component.shielded_pool.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!( + "penumbra.core.component.shielded_pool.v1.{}", Self::NAME + ) + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventInboundFungibleTokenTransfer { + /// The value being transferred in. + #[prost(message, optional, tag = "1")] + pub value: ::core::option::Option, + /// The sender on the counterparty chain. + #[prost(string, tag = "2")] + pub sender: ::prost::alloc::string::String, + /// The receiver on this chain. + #[prost(message, optional, tag = "3")] + pub receiver: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub meta: ::core::option::Option, +} +impl ::prost::Name for EventInboundFungibleTokenTransfer { + const NAME: &'static str = "EventInboundFungibleTokenTransfer"; + const PACKAGE: &'static str = "penumbra.core.component.shielded_pool.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!( + "penumbra.core.component.shielded_pool.v1.{}", Self::NAME + ) + } +} /// Generated client implementations. #[cfg(feature = "rpc")] pub mod query_service_client { diff --git a/crates/proto/src/gen/penumbra.core.component.shielded_pool.v1.serde.rs b/crates/proto/src/gen/penumbra.core.component.shielded_pool.v1.serde.rs index a4df2af9c1..85e3edf22e 100644 --- a/crates/proto/src/gen/penumbra.core.component.shielded_pool.v1.serde.rs +++ b/crates/proto/src/gen/penumbra.core.component.shielded_pool.v1.serde.rs @@ -494,6 +494,537 @@ impl<'de> serde::Deserialize<'de> for EventBroadcastClue { deserializer.deserialize_struct("penumbra.core.component.shielded_pool.v1.EventBroadcastClue", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for EventInboundFungibleTokenTransfer { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.value.is_some() { + len += 1; + } + if !self.sender.is_empty() { + len += 1; + } + if self.receiver.is_some() { + len += 1; + } + if self.meta.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.shielded_pool.v1.EventInboundFungibleTokenTransfer", len)?; + if let Some(v) = self.value.as_ref() { + struct_ser.serialize_field("value", v)?; + } + if !self.sender.is_empty() { + struct_ser.serialize_field("sender", &self.sender)?; + } + if let Some(v) = self.receiver.as_ref() { + struct_ser.serialize_field("receiver", v)?; + } + if let Some(v) = self.meta.as_ref() { + struct_ser.serialize_field("meta", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for EventInboundFungibleTokenTransfer { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "value", + "sender", + "receiver", + "meta", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Value, + Sender, + Receiver, + Meta, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "value" => Ok(GeneratedField::Value), + "sender" => Ok(GeneratedField::Sender), + "receiver" => Ok(GeneratedField::Receiver), + "meta" => Ok(GeneratedField::Meta), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = EventInboundFungibleTokenTransfer; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.shielded_pool.v1.EventInboundFungibleTokenTransfer") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut value__ = None; + let mut sender__ = None; + let mut receiver__ = None; + let mut meta__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Value => { + if value__.is_some() { + return Err(serde::de::Error::duplicate_field("value")); + } + value__ = map_.next_value()?; + } + GeneratedField::Sender => { + if sender__.is_some() { + return Err(serde::de::Error::duplicate_field("sender")); + } + sender__ = Some(map_.next_value()?); + } + GeneratedField::Receiver => { + if receiver__.is_some() { + return Err(serde::de::Error::duplicate_field("receiver")); + } + receiver__ = map_.next_value()?; + } + GeneratedField::Meta => { + if meta__.is_some() { + return Err(serde::de::Error::duplicate_field("meta")); + } + meta__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(EventInboundFungibleTokenTransfer { + value: value__, + sender: sender__.unwrap_or_default(), + receiver: receiver__, + meta: meta__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.shielded_pool.v1.EventInboundFungibleTokenTransfer", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for EventOutboundFungibleTokenRefund { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.value.is_some() { + len += 1; + } + if self.sender.is_some() { + len += 1; + } + if !self.receiver.is_empty() { + len += 1; + } + if self.reason != 0 { + len += 1; + } + if self.meta.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.shielded_pool.v1.EventOutboundFungibleTokenRefund", len)?; + if let Some(v) = self.value.as_ref() { + struct_ser.serialize_field("value", v)?; + } + if let Some(v) = self.sender.as_ref() { + struct_ser.serialize_field("sender", v)?; + } + if !self.receiver.is_empty() { + struct_ser.serialize_field("receiver", &self.receiver)?; + } + if self.reason != 0 { + let v = event_outbound_fungible_token_refund::Reason::try_from(self.reason) + .map_err(|_| serde::ser::Error::custom(format!("Invalid variant {}", self.reason)))?; + struct_ser.serialize_field("reason", &v)?; + } + if let Some(v) = self.meta.as_ref() { + struct_ser.serialize_field("meta", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for EventOutboundFungibleTokenRefund { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "value", + "sender", + "receiver", + "reason", + "meta", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Value, + Sender, + Receiver, + Reason, + Meta, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "value" => Ok(GeneratedField::Value), + "sender" => Ok(GeneratedField::Sender), + "receiver" => Ok(GeneratedField::Receiver), + "reason" => Ok(GeneratedField::Reason), + "meta" => Ok(GeneratedField::Meta), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = EventOutboundFungibleTokenRefund; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.shielded_pool.v1.EventOutboundFungibleTokenRefund") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut value__ = None; + let mut sender__ = None; + let mut receiver__ = None; + let mut reason__ = None; + let mut meta__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Value => { + if value__.is_some() { + return Err(serde::de::Error::duplicate_field("value")); + } + value__ = map_.next_value()?; + } + GeneratedField::Sender => { + if sender__.is_some() { + return Err(serde::de::Error::duplicate_field("sender")); + } + sender__ = map_.next_value()?; + } + GeneratedField::Receiver => { + if receiver__.is_some() { + return Err(serde::de::Error::duplicate_field("receiver")); + } + receiver__ = Some(map_.next_value()?); + } + GeneratedField::Reason => { + if reason__.is_some() { + return Err(serde::de::Error::duplicate_field("reason")); + } + reason__ = Some(map_.next_value::()? as i32); + } + GeneratedField::Meta => { + if meta__.is_some() { + return Err(serde::de::Error::duplicate_field("meta")); + } + meta__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(EventOutboundFungibleTokenRefund { + value: value__, + sender: sender__, + receiver: receiver__.unwrap_or_default(), + reason: reason__.unwrap_or_default(), + meta: meta__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.shielded_pool.v1.EventOutboundFungibleTokenRefund", FIELDS, GeneratedVisitor) + } +} +impl serde::Serialize for event_outbound_fungible_token_refund::Reason { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + let variant = match self { + Self::Unspecified => "REASON_UNSPECIFIED", + Self::Timeout => "REASON_TIMEOUT", + Self::Error => "REASON_ERROR", + }; + serializer.serialize_str(variant) + } +} +impl<'de> serde::Deserialize<'de> for event_outbound_fungible_token_refund::Reason { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "REASON_UNSPECIFIED", + "REASON_TIMEOUT", + "REASON_ERROR", + ]; + + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = event_outbound_fungible_token_refund::Reason; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + fn visit_i64(self, v: i64) -> std::result::Result + where + E: serde::de::Error, + { + i32::try_from(v) + .ok() + .and_then(|x| x.try_into().ok()) + .ok_or_else(|| { + serde::de::Error::invalid_value(serde::de::Unexpected::Signed(v), &self) + }) + } + + fn visit_u64(self, v: u64) -> std::result::Result + where + E: serde::de::Error, + { + i32::try_from(v) + .ok() + .and_then(|x| x.try_into().ok()) + .ok_or_else(|| { + serde::de::Error::invalid_value(serde::de::Unexpected::Unsigned(v), &self) + }) + } + + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "REASON_UNSPECIFIED" => Ok(event_outbound_fungible_token_refund::Reason::Unspecified), + "REASON_TIMEOUT" => Ok(event_outbound_fungible_token_refund::Reason::Timeout), + "REASON_ERROR" => Ok(event_outbound_fungible_token_refund::Reason::Error), + _ => Err(serde::de::Error::unknown_variant(value, FIELDS)), + } + } + } + deserializer.deserialize_any(GeneratedVisitor) + } +} +impl serde::Serialize for EventOutboundFungibleTokenTransfer { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.value.is_some() { + len += 1; + } + if self.sender.is_some() { + len += 1; + } + if !self.receiver.is_empty() { + len += 1; + } + if self.meta.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.shielded_pool.v1.EventOutboundFungibleTokenTransfer", len)?; + if let Some(v) = self.value.as_ref() { + struct_ser.serialize_field("value", v)?; + } + if let Some(v) = self.sender.as_ref() { + struct_ser.serialize_field("sender", v)?; + } + if !self.receiver.is_empty() { + struct_ser.serialize_field("receiver", &self.receiver)?; + } + if let Some(v) = self.meta.as_ref() { + struct_ser.serialize_field("meta", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for EventOutboundFungibleTokenTransfer { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "value", + "sender", + "receiver", + "meta", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Value, + Sender, + Receiver, + Meta, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "value" => Ok(GeneratedField::Value), + "sender" => Ok(GeneratedField::Sender), + "receiver" => Ok(GeneratedField::Receiver), + "meta" => Ok(GeneratedField::Meta), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = EventOutboundFungibleTokenTransfer; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.shielded_pool.v1.EventOutboundFungibleTokenTransfer") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut value__ = None; + let mut sender__ = None; + let mut receiver__ = None; + let mut meta__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Value => { + if value__.is_some() { + return Err(serde::de::Error::duplicate_field("value")); + } + value__ = map_.next_value()?; + } + GeneratedField::Sender => { + if sender__.is_some() { + return Err(serde::de::Error::duplicate_field("sender")); + } + sender__ = map_.next_value()?; + } + GeneratedField::Receiver => { + if receiver__.is_some() { + return Err(serde::de::Error::duplicate_field("receiver")); + } + receiver__ = Some(map_.next_value()?); + } + GeneratedField::Meta => { + if meta__.is_some() { + return Err(serde::de::Error::duplicate_field("meta")); + } + meta__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(EventOutboundFungibleTokenTransfer { + value: value__, + sender: sender__, + receiver: receiver__.unwrap_or_default(), + meta: meta__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.shielded_pool.v1.EventOutboundFungibleTokenTransfer", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for EventOutput { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result @@ -1341,6 +1872,121 @@ impl<'de> serde::Deserialize<'de> for FmdParameters { deserializer.deserialize_struct("penumbra.core.component.shielded_pool.v1.FmdParameters", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for FungibleTokenTransferPacketMetadata { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if !self.channel.is_empty() { + len += 1; + } + if self.sequence != 0 { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.shielded_pool.v1.FungibleTokenTransferPacketMetadata", len)?; + if !self.channel.is_empty() { + struct_ser.serialize_field("channel", &self.channel)?; + } + if self.sequence != 0 { + #[allow(clippy::needless_borrow)] + struct_ser.serialize_field("sequence", ToString::to_string(&self.sequence).as_str())?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for FungibleTokenTransferPacketMetadata { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "channel", + "sequence", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Channel, + Sequence, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "channel" => Ok(GeneratedField::Channel), + "sequence" => Ok(GeneratedField::Sequence), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = FungibleTokenTransferPacketMetadata; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.shielded_pool.v1.FungibleTokenTransferPacketMetadata") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut channel__ = None; + let mut sequence__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Channel => { + if channel__.is_some() { + return Err(serde::de::Error::duplicate_field("channel")); + } + channel__ = Some(map_.next_value()?); + } + GeneratedField::Sequence => { + if sequence__.is_some() { + return Err(serde::de::Error::duplicate_field("sequence")); + } + sequence__ = + Some(map_.next_value::<::pbjson::private::NumberDeserialize<_>>()?.0) + ; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(FungibleTokenTransferPacketMetadata { + channel: channel__.unwrap_or_default(), + sequence: sequence__.unwrap_or_default(), + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.shielded_pool.v1.FungibleTokenTransferPacketMetadata", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for GenesisContent { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result diff --git a/crates/proto/src/gen/proto_descriptor.bin.no_lfs b/crates/proto/src/gen/proto_descriptor.bin.no_lfs index 5127703ec3ff420ed8031f1a9a75fde97737de06..a1b2955a571836ef3b777301c5b908cafb93b694 100644 GIT binary patch delta 2768 zcma)8O>7ip81B3~yWRcxD?_`5{p0H*F0@c027*RFNJ>H?RF)bJYRGi=Yj^B+W}TU( zX*{g)Vm#@=WD`Olgb+|F5;ZMBVoC@hV7z)XA>rUnqbG?6pYNNQWr-*p=J4#i@ALjW z?>pN*S9-Km`tUpLy@&eZ=NfAdeXs9b`tqT6EL|$T%{Cos+SAr#T}(JL!k%zV+nW;Z zn`UK3_(z0qR!!e*_bsQg2Qw_`iT9hru88%-wJmzNgNvULD~_EKwm;tVC!MBUeJX4F z!Ef>1sogB$3A-xX_R#O~(#s(*K<|o*uuchgeGK%eat9kG(`S*(dL6&7*mkn=C z=u<;HnRr>b6>r5D|L^R|VhRaAEw^3Ilbgj7t}s2vR@trIc}!)u|5@=y%gU)7I(JHp zca=Eu{3RZ0FZ@X(2PO_585@6VqE9pW*vj(Qf#c&xPmYz#BFe-PE}wE-nK`f2OxqT9 z?$~^| zJ5-^k%z9IxKdMpTSX@!%a$e3LZ?(;{p{nvh6AuUS1?B zW;Uu|@|?&(X0_@H&y%g}Zeflzki$M5lF1q`(qKM<;J!$Fh`rHLEG%CNyXA~YCuL4yGi`x1ry1w`yiL4VQ2mV}py zc7&P5kT>Aw2*UR=y*L~}WxRb<%_eUou29dda5i~ZUZLa`wi*@X^&TU!K=duelqWD= zFHr9)R$>YHS`P0jmaLT-NJSwEUc($bK$y<+Z96g8w7n(2qMLZh1n`wBP(B&vdv2V7Vp-rt3C{(8jcdS7zd04 zi2{OpF_I`C#<3_RI;@EC^1760P9+|E@xq1p7qQeQjMJ@PK57ZCM*_{0T*B*8psy=t z6mUc8bEjgCMDN#3NYVGjY{N%CaGk=>saiJqiSs&zNKJww&}3d|@`lu8OI{pc#JeFy zxlP3bx=GZZDex1udBqkKfUC)CW}_i&sJ_E3H4F2IJBB{oq|}*0E1C#jP9q zGW4GL0JT_N3uCO;y|yE))EboZIG>z z^pSffb3M^g*#zsBCmpFTN9zWN)R!q-w}O$lP54p;r?3Fa+mzg@j&MP0>W9I5_o#?B$xISQzyUr E2Q#pUOaK4? delta 87 zcmeyhU48m|^@c5sSx;E5RCC!(chqN+oIdY3i}Lo>PZ$Hmwmbc23}xhe=FP<}#LB=h kW$OZ_`>O4;^_YN|8Hibcm=%cGfS4VKIkwN%V!Z diff --git a/proto/penumbra/penumbra/core/component/shielded_pool/v1/shielded_pool.proto b/proto/penumbra/penumbra/core/component/shielded_pool/v1/shielded_pool.proto index 02251a7800..f93427e8e8 100644 --- a/proto/penumbra/penumbra/core/component/shielded_pool/v1/shielded_pool.proto +++ b/proto/penumbra/penumbra/core/component/shielded_pool/v1/shielded_pool.proto @@ -275,3 +275,58 @@ message AssetMetadataByIdsResponse { // A single asset metadata streamed from the node. core.asset.v1.Metadata denom_metadata = 1; } + +// Metadata about the packet associated with the transfer. +// +// This allows identifying which specific packet is associated with the transfer. +// Implicitly, both ports are going to be "transfer". +message FungibleTokenTransferPacketMetadata { + // The identifier for the channel on *this* chain. + string channel = 1; + // Sequence number for the packet. + uint64 sequence = 2; +} + +message EventOutboundFungibleTokenTransfer { + // The value being transferred out of the chain. + core.asset.v1.Value value = 1; + // The sending address on chain. + core.keys.v1.Address sender = 2; + // The receiving address, which we don't assume anything about. + string receiver = 3; + FungibleTokenTransferPacketMetadata meta = 4; +} + +message EventOutboundFungibleTokenRefund { + enum Reason { + // No particular reason. + REASON_UNSPECIFIED = 0; + // The transfer timed out. + REASON_TIMEOUT = 1; + // The transfer was acknowledged with an error. + REASON_ERROR = 2; + } + + // The value being refunded. + core.asset.v1.Value value = 1; + // The sender being refunded. + core.keys.v1.Address sender = 2; + // The address that attempted to receive the funds. + string receiver = 3; + // Why the refund is happening. + Reason reason = 4; + // This will be the metadata for the packet for the transfer being refunded. + // + // This allows linking a refund to the transfer. + FungibleTokenTransferPacketMetadata meta = 5; +} + +message EventInboundFungibleTokenTransfer { + // The value being transferred in. + core.asset.v1.Value value = 1; + // The sender on the counterparty chain. + string sender = 2; + // The receiver on this chain. + core.keys.v1.Address receiver = 3; + FungibleTokenTransferPacketMetadata meta = 4; +} From ce17c3a67749cf79dcc797a4205647949cfe793c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Mon, 7 Oct 2024 13:44:29 -0700 Subject: [PATCH 31/43] Transfer events indexing (#4877) ## Describe your changes This implements indexing for IBC transfer events. Merge #4874 first. ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > indexing only --- crates/bin/pindexer/src/ibc/ibc.sql | 17 ++ crates/bin/pindexer/src/ibc/mod.rs | 223 +++++++++++++++++++++++++ crates/bin/pindexer/src/indexer_ext.rs | 1 + crates/bin/pindexer/src/lib.rs | 1 + 4 files changed, 242 insertions(+) create mode 100644 crates/bin/pindexer/src/ibc/ibc.sql create mode 100644 crates/bin/pindexer/src/ibc/mod.rs diff --git a/crates/bin/pindexer/src/ibc/ibc.sql b/crates/bin/pindexer/src/ibc/ibc.sql new file mode 100644 index 0000000000..66e6c40a5e --- /dev/null +++ b/crates/bin/pindexer/src/ibc/ibc.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS ibc_transfer ( + id SERIAL PRIMARY KEY, + -- The height that this transfer happened at. + height BIGINT NOT NULL, + -- The AssetID of whatever is being transferred. + asset BYTEA NOT NULL, + -- The amount being transf + amount NUMERIC(39, 0) NOT NULL, + -- The address on the penumbra side. + -- + -- This may be the sender or the receiver, depending on if this inflow or outflow. + penumbra_addr BYTEA NOT NULL, + -- The address on the other side. + foreign_addr TEXT NOT NULL, + -- What kind of transfer this is. + kind TEXT NOT NULL CHECK (kind IN ('inbound', 'outbound', 'refund_timeout', 'refund_error', 'refund_other')) +); diff --git a/crates/bin/pindexer/src/ibc/mod.rs b/crates/bin/pindexer/src/ibc/mod.rs new file mode 100644 index 0000000000..9bc6d795c4 --- /dev/null +++ b/crates/bin/pindexer/src/ibc/mod.rs @@ -0,0 +1,223 @@ +use anyhow::anyhow; +use cometindex::{async_trait, AppView, ContextualizedEvent, PgTransaction}; +use penumbra_asset::Value; +use penumbra_keys::Address; +use penumbra_proto::{ + core::component::shielded_pool::v1::{ + self as pb, event_outbound_fungible_token_refund::Reason as RefundReason, + }, + event::ProtoEvent as _, +}; +use sqlx::PgPool; + +/// The kind of event we might care about. +#[derive(Clone, Copy, Debug)] +enum EventKind { + InboundTransfer, + OutboundTransfer, + OutboundRefund, +} + +impl EventKind { + fn tag(&self) -> &'static str { + match self { + Self::InboundTransfer => { + "penumbra.core.component.shielded_pool.v1.EventInboundFungibleTokenTransfer" + } + Self::OutboundTransfer => { + "penumbra.core.component.shielded_pool.v1.EventOutboundFungibleTokenTransfer" + } + Self::OutboundRefund => { + "penumbra.core.component.shielded_pool.v1.EventOutboundFungibleTokenRefund" + } + } + } +} + +impl TryFrom<&str> for EventKind { + type Error = anyhow::Error; + + fn try_from(value: &str) -> Result { + for kind in [ + Self::InboundTransfer, + Self::OutboundTransfer, + Self::OutboundRefund, + ] { + if kind.tag() == value { + return Ok(kind); + } + } + Err(anyhow!("unexpected event kind: {value}")) + } +} + +/// Represents the event data that we care about. +#[derive(Debug, Clone)] +enum Event { + InboundTransfer { + receiver: Address, + sender: String, + value: Value, + }, + OutboundTransfer { + sender: Address, + receiver: String, + value: Value, + }, + OutboundRefund { + sender: Address, + receiver: String, + value: Value, + reason: RefundReason, + }, +} + +impl TryFrom<&ContextualizedEvent> for Event { + type Error = anyhow::Error; + + fn try_from(event: &ContextualizedEvent) -> Result { + match EventKind::try_from(event.event.kind.as_str())? { + EventKind::InboundTransfer => { + let pe = pb::EventInboundFungibleTokenTransfer::from_event(&event.event)?; + Ok(Self::InboundTransfer { + receiver: pe.receiver.ok_or(anyhow!("missing receiver"))?.try_into()?, + sender: pe.sender, + value: pe.value.ok_or(anyhow!("missing value"))?.try_into()?, + }) + } + EventKind::OutboundTransfer => { + let pe = pb::EventOutboundFungibleTokenTransfer::from_event(&event.event)?; + Ok(Self::OutboundTransfer { + sender: pe.sender.ok_or(anyhow!("missing sender"))?.try_into()?, + receiver: pe.receiver, + value: pe.value.ok_or(anyhow!("missing value"))?.try_into()?, + }) + } + EventKind::OutboundRefund => { + let pe = pb::EventOutboundFungibleTokenRefund::from_event(&event.event)?; + let reason = pe.reason(); + Ok(Self::OutboundRefund { + sender: pe.sender.ok_or(anyhow!("missing sender"))?.try_into()?, + receiver: pe.receiver, + value: pe.value.ok_or(anyhow!("missing value"))?.try_into()?, + reason, + }) + } + } + } +} + +/// The database's view of a transfer. +#[derive(Debug)] +struct DatabaseTransfer { + penumbra_addr: Address, + foreign_addr: String, + negate: bool, + value: Value, + kind: &'static str, +} + +impl Event { + fn db_transfer(self) -> DatabaseTransfer { + match self { + Event::InboundTransfer { + receiver, + sender, + value, + } => DatabaseTransfer { + penumbra_addr: receiver, + foreign_addr: sender, + negate: false, + value, + kind: "inbound", + }, + Event::OutboundTransfer { + sender, + receiver, + value, + } => DatabaseTransfer { + penumbra_addr: sender, + foreign_addr: receiver, + negate: true, + value, + kind: "outbound", + }, + Event::OutboundRefund { + sender, + receiver, + value, + reason, + } => DatabaseTransfer { + penumbra_addr: sender, + foreign_addr: receiver, + negate: false, + value, + kind: match reason { + RefundReason::Unspecified => "refund_other", + RefundReason::Timeout => "refund_timeout", + RefundReason::Error => "refund_error", + }, + }, + } + } +} + +async fn init_db(dbtx: &mut PgTransaction<'_>) -> anyhow::Result<()> { + for statement in include_str!("ibc.sql").split(";") { + sqlx::query(statement).execute(dbtx.as_mut()).await?; + } + Ok(()) +} + +async fn create_transfer( + dbtx: &mut PgTransaction<'_>, + height: u64, + transfer: DatabaseTransfer, +) -> anyhow::Result<()> { + sqlx::query("INSERT INTO ibc_transfer VALUES (DEFAULT, $7, $1, $6::NUMERIC(39, 0) * $2::NUMERIC(39, 0), $3, $4, $5)") + .bind(transfer.value.asset_id.to_bytes()) + .bind(transfer.value.amount.to_string()) + .bind(transfer.penumbra_addr.to_vec()) + .bind(transfer.foreign_addr) + .bind(transfer.kind) + .bind(if transfer.negate { -1i32 } else { 1i32 }) + .bind(i64::try_from(height)?) + .execute(dbtx.as_mut()) + .await?; + Ok(()) +} + +#[derive(Debug)] +pub struct Component {} + +impl Component { + pub fn new() -> Self { + Self {} + } +} + +#[async_trait] +impl AppView for Component { + async fn init_chain( + &self, + dbtx: &mut PgTransaction, + _app_state: &serde_json::Value, + ) -> anyhow::Result<()> { + init_db(dbtx).await + } + + fn is_relevant(&self, type_str: &str) -> bool { + EventKind::try_from(type_str).is_ok() + } + + #[tracing::instrument(skip_all, fields(height = event.block_height, name = event.event.kind.as_str()))] + async fn index_event( + &self, + dbtx: &mut PgTransaction, + event: &ContextualizedEvent, + _src_db: &PgPool, + ) -> anyhow::Result<()> { + let transfer = Event::try_from(event)?.db_transfer(); + create_transfer(dbtx, event.block_height, transfer).await + } +} diff --git a/crates/bin/pindexer/src/indexer_ext.rs b/crates/bin/pindexer/src/indexer_ext.rs index 8ee1939318..6a7c5e8208 100644 --- a/crates/bin/pindexer/src/indexer_ext.rs +++ b/crates/bin/pindexer/src/indexer_ext.rs @@ -12,5 +12,6 @@ impl IndexerExt for cometindex::Indexer { .with_index(crate::governance::GovernanceProposals {}) .with_index(crate::dex::Component::new()) .with_index(crate::supply::Component::new()) + .with_index(crate::ibc::Component::new()) } } diff --git a/crates/bin/pindexer/src/lib.rs b/crates/bin/pindexer/src/lib.rs index 2e17584475..42e319d134 100644 --- a/crates/bin/pindexer/src/lib.rs +++ b/crates/bin/pindexer/src/lib.rs @@ -4,6 +4,7 @@ mod indexer_ext; pub use indexer_ext::IndexerExt; pub mod block; pub mod dex; +pub mod ibc; pub mod shielded_pool; mod sql; pub mod stake; From 4f0d1f7ef9c7922d7ae184f18b86f7852c55cff9 Mon Sep 17 00:00:00 2001 From: Chris Czub Date: Mon, 7 Oct 2024 16:54:14 -0400 Subject: [PATCH 32/43] Fix IBC asset regex in denom metadata (#4872) ## Describe your changes This fixes a bug in the `ibc_transfer_path` method that did not properly handle all IBC denoms, specifically those containing slashes, for example `transfer/channel-4/factory/osmo1q77cw0mmlluxu0wr29fcdd0tdnh78gzhkvhe4n6ulal9qvrtu43qtd0nh8/shitmos`. ## Issue ticket number and link Discovered when investigating #4834 however this doesn't close out the issue. ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > affected method only called in display formatting right now --- crates/core/asset/src/asset/denom_metadata.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/core/asset/src/asset/denom_metadata.rs b/crates/core/asset/src/asset/denom_metadata.rs index 4e059468dc..9ec45b1b1d 100644 --- a/crates/core/asset/src/asset/denom_metadata.rs +++ b/crates/core/asset/src/asset/denom_metadata.rs @@ -371,7 +371,8 @@ impl Metadata { /// if this is an IBC transferred asset, `None` otherwise. pub fn ibc_transfer_path(&self) -> anyhow::Result> { let base_denom = self.base_denom().denom; - let re = Regex::new(r"^(?transfer/channel-[0-9]+)/(?\w+)$") + // The base denom portion of an IBC asset path may contain slashes: https://github.com/cosmos/ibc/issues/737 + let re = Regex::new(r"^(?transfer/channel-[0-9]+)/(?[\w\/]+)$") .context("error instantiating denom matching regex")?; let Some(caps) = re.captures(&base_denom) else { From 7694c38f9f869bbfaac9bacbbee6bae6f724e46f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Mon, 7 Oct 2024 14:00:02 -0700 Subject: [PATCH 33/43] proto: add extension trait for easier parsing of domain types (#4886) ## Describe your changes This allows creating domain types for each of the events, e.g. `EventBatchSwap`, and then doing `EventBatchSwap::try_from_event(&event)`, which makes the pindexer app views much more ergonomic, especially because we can avoid having to duplicate event parsing logic across views. In subsequent PRs, we should create domain types for each component. We can probably do this as needed, when we write app views touching particular components. ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > Just an internal refactor to events code, so doubly non breaking. --- crates/proto/src/event.rs | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/crates/proto/src/event.rs b/crates/proto/src/event.rs index 0c24a6ce48..e7e69fefa4 100644 --- a/crates/proto/src/event.rs +++ b/crates/proto/src/event.rs @@ -1,4 +1,4 @@ -use crate::{Message, Name}; +use crate::{DomainType, Message, Name}; use anyhow::{self, Context}; use serde::{de::DeserializeOwned, Serialize}; use std::collections::HashMap; @@ -121,3 +121,25 @@ mod tests { assert_eq!(proto_output, proto_output2); } } + +/// An extension trait allowing for easy conversion from events into domain types. +/// +/// This makes the task of writing code that processes events much more easy, +/// since you can just attempt to parse the event directly into the specific domain +/// type. +pub trait EventDomainType: DomainType +where + ::Proto: ProtoEvent, + anyhow::Error: From<::Proto>>::Error>, +{ + fn try_from_event(event: &abci::Event) -> anyhow::Result { + Ok(::Proto::from_event(event)?.try_into()?) + } +} + +impl EventDomainType for T +where + ::Proto: ProtoEvent, + anyhow::Error: From<::Proto>>::Error>, +{ +} From ea7e231c2d9afeff1d2751e6a1b7c22f605652d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Tue, 8 Oct 2024 11:49:12 -0700 Subject: [PATCH 34/43] Implement an event for candlestick data (#4889) ## Describe your changes This adds a new event `EventCandlestickData`, containing a directed trading pair and a candlestick, allowing us to emit events for when trading activity producing a stick happens on a given pair, at a given height. ## Issue ticket number and link Closes #4868 ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > event addition only --- .../src/gen/proto_descriptor.bin.no_lfs | Bin 102002 -> 102050 bytes .../component/dex/src/component/chandelier.rs | 8 ++ crates/core/component/dex/src/event.rs | 39 +++++- .../src/gen/penumbra.core.component.dex.v1.rs | 21 ++++ .../penumbra.core.component.dex.v1.serde.rs | 112 ++++++++++++++++++ .../proto/src/gen/proto_descriptor.bin.no_lfs | Bin 646138 -> 646769 bytes .../penumbra/core/component/dex/v1/dex.proto | 11 ++ 7 files changed, 189 insertions(+), 2 deletions(-) diff --git a/crates/cnidarium/src/gen/proto_descriptor.bin.no_lfs b/crates/cnidarium/src/gen/proto_descriptor.bin.no_lfs index f41da7426e0409c58117d4e9d6a186a1632aff36..383649ad97d9ab3daa8efd3942653231e8e3d5a5 100644 GIT binary patch delta 10206 zcmY*fdvsOhmCrf%eBXD@xw*;By-9%ZN_d3#OKnF)>NtS4E=M|3r?ZxJIU>eN>nN2t z)tTwKBLgBq8p8pCghq5MAdB!=7sTiiwJ4fOkQt&u$qa}FR3r)tni87%?eB5U=|8OO zefIu+d;j+So|p3*__M#`wts1tJz%u8B&Znu82)``^a}htsha-YUHt_9y?)HUTTAl$ z#$4(8EA}URkzv&Z_4!LvZm7*Y%Pl|YC!9hnL2aLB?6a>^T+rMCC1W zZ=G{Z<^0-57S>cQSX6m;&B6z3=Fh9GxqEWuKP;-Lyz`!#d9@EcShHyI*oAjZp1*M6 zxXFuZA6#(%&;N-$?yOvJ|DxKOd3R6tD{sBGM#DU?uy(-%_v2Ik*UwF|a`bKP?77!e z{`0(F8k2eRYag0--+hn7$MgM^&nP%YMi+{l#AMohMJh{JNx2adBo3-DqD$i7*+vu@ zXX+9*bCzF3c{>FezLeRkSVDbw3IMj0i9&z4&*?J}vn;r8Q`N|2%<=tVUns2&HA@xw zYZKZIe)ajCo7P!Bsb`KG8d@-{XURegr|=**FsCduu;BKoos~sKENv*TK*4&VU_Fz?1>)v>%hnBcZgT`-gT>7(S)w5*j?FAH z(1?K8%mxoJB3RtahF)Pru(+9xou)~&GJFfOcZDQUI9r%V`D0|#7LmA>Ihl&!?UvCa z@K;J_i!X?6D@&L746GpVe&4|X9xEbV~Fe?jwwY|J*XWZ1LPh|R(FOx?i zZTT(R2eaGTqU00cuPx#)fl=F7x>To^ARjisaFIarZEVnZ^A*Xru~Fmv9hMvb&3Pyo zwtLs@ha!9j_=h5V2l$6rdbmb#fPaXM7-dA@A7Z1wZ$#i9VpG58f2!~S)e(%}^Z9KZ z5k4bjgwKHA!P2JZ8Sp#U&|w;o0l$L{pK3(lcd+qO{Oe@+XoJq+{ui#ly)%O6fbWdp zIp8}ZdN|-a*?>VBj|0AwRZK7<;5*sq@qVKUAL6mV-}}@Z$0B?I{9_Tm0RAzSHq9o0 ze~b;U(trZ^$JmG~jYxoO(iQ%j65p{i`~0ME=c98cvv(6p@_mt0Q9SPSYYa0RGWlNQwj=G8z|s$4zv-@y~`|>4Uwre zFSAlT{m2hMm)Ve^db0T$r=Qto-SZI`L_>p*u=-ijtYSW}`dNjUnLd#ES<$2^{!BS< zlNo0*4?YU64(j*u>lWh>ruj+&W-(8e84)mxdAV77lYm*wt48{>C7akNmvXx?=-4+a zvy_9y4ohY%cgW?zxL4nwy&Mea1fd1^avo9#1>l$SvN4eb23XF=Pw;=OfncyQnDyEt zvscD}EC8*H0}*vA<3L2+N&@;94TP3zSoQkDa~k3}HcHUI!xC6>syFb`C?Fd+4TQ7L zqz1*>;FZ^3%dL$gB@oftcto@WtmS2T8A(_+?ZRvMn6bK;g&AiZx2I^H3sFpnrV52X zTgSs<(&=^`uP_s!5J>BI(ZtFATv^Oi#%bcg-`#xE^@Vl{Kur-)3P4RfY3i2(P!k_+ z_MQ}gn)vsp`?t{%8eB97=idCsX@v~SY%@>k0|TQfLDWYpM*B=NFB|9&Aur6$jls}2 zhE3eaaV(SvT#p>$A{?YRT{}Dz{QQj>H$KCYDeXYzi;HlOVxez4sSJOP7qsYuK4tJ6 z$3dU^I7R!xrr`58uA8~Z@W=Km((jY3*AR?u0L>i@3l;JygLAy?=2+%vY zoAvKRN=1}XK3H?$u9m4fnh02vI~iwFXzcb^5l} z2!R%x)(C;pY32Ha1rH#!a(%*rh&8>H>k}44>P9Ul9uaG7rXW92`9PHoNL& zQ}rTQ^|*+PrT8}O??tp{?&tR0idH-L&Bjx~jX06g3^P%w{@71pu;#FhCYaJ=`iJmh>$u= zc_D(n4pUx;C~pUqm!?!PG2cn-1smJ>%+6>Cnh>3aJD8E3;SM5(=p^p8t00k1SMYKB z&fB^o1gddY6eNw*#q}|{Sk}0UW7niL1R{d$;#KO31`$Da@rl#?8)c9f?RaqZ&`mRs zNAOhK<2+fd0jRjg`54tg#j?1^sfB3GET-a~RK?x*E}wBSLZRZGG{uD-P)?fSLd3u) zO>v8nPq!>?F%`Gl6c-Y_Agba*1RLF^xakc4lowpq#ZANVQ|^{a^3zn?Gjd;^g})Kw z850;p1a`*o4-t-M4F3=j*csZHKcH)Nnz+4~AO7FRtlKX}5VQzfGzEYS8JRAD2#AY( zsJhibL;)`H%4w2Lh$z5CK4GflFHN(#SMm4s5k8|g8iNjD`8 z!S{|%&#e+pkvu7p8C77FD9Ol!Z3(dWYGJPts{0|Md7x0sD@0Ufwa|xTh!}dc7&1xL z79to}EvBlu1`!Ob5qz}y3I^7Ql2L`SM@wknb%M^0v+y^DSr-jM#a}1%=@DNs%sQb@ zj}S4;I)NUgDOf^Mlb~Zuu+1))XQJM&|@ zZnAPM!Y6(?>8ZZkg)>4HK*~rR3U#`b3dFKq=)JA7|`c{w8LBaJ!VVjRglthC>FI_K2LDBO6qR4@6+Lb^Fpk9o~hH6zb|P6)5N1LSOw!0fRQ-sNR&aQmFPNMUEa<+C*_W90c+o<1T~&MdU)u z%OB$&NbVis43zy&m{eRS)PgA$@OXz7OuFJ{kp^!UcBgPqvK%7?$b?3b2@zDci==9L zh=6MsgT}}LL&R|H0{6XQ)ElBpXcIfZ8m$AQD8X~4&@TieXTjUMA#bDr;$h(o^V5EQ z;3tk{oeqX}-;95M(f!xClfwB#FqI4z+EO@o3ujiOfb%KgT(6R$*TBz&eKsr_(e;@S zLo}cARq%7+jMeO@S3xM$l9F}QtKjDX%`fB6K?8_0!YK}2XhnfS{Te_nRJ1b!zXnJb zC?J0k6-~VgLSc#qg|v`mrV=3P9$|kgOF%5rE&&B9kdv&^Hqj%J>Zpg$2%tw)U9C$% zQ@uybxE29QXOy5v%=|ViflVd&GAe;BOYl`x0w{oZPL+U)X1nDXeqK1=h@7F`megMm z&KylW6mYsowUB*8?~%R2{&Pq@^-e2nZ9`UeKWB(9)PzsN`h2I&*}>J0^`zDz^tF){5S zqn5OPWSv|>&ij}K3Lr>@nm@?9U)YbkG=GSHLWIIhAKIMzg?^gASH#mV^wR`H)UIDl zoTO`qpBex*`El1wpIm~9_aAOJe-g5IkGf{vv7rFuVpq-|Dqez$_n)q_)HStB$l5*T zVwT9PU_!N9=7zO%GQ7^Ue~V#d4h}8jbuJ#$bBY#+7P}Q;6b@uqK%pM-vZMejD_mSh zj0X&!aKm-Tkvkt0W*ve89#6P(9YWlIfV>h<;+lFn^Kzt7KNm?0X@Vbj6!i$Z-nAP; z>S=F+!cb3FlX_Rbr)TB%T<;E6KU6>j_4V%LDVll*)YrRH8$;?HVm|j|n6&Lk>Kh{J zp+J(WBI+HYK5(5iA@%r)70j)5otst2*e>}~uKl}^Y1-wUas_2XBrqa+eZ*@H3Rpm+ z&c)IKB&>J!lRYV5(B$f)u9TI+(?r4m9!+jxDjWpz1~;5}OwJN0)Qd7fP%SpNgDU7X zomMD>-R#<1LZ)f!hQct-z;v^#&#|yZ0L^Z>+HWC(>1G%Ety-Y)dlbN`w}ebHV*2Tb zXeNo?NQ#_lSZDzG3^5%l5aF{C^^BMa` zyI3A5vL1Nf>L!cS1dwktB;IOdjHW*)oGjSb=Gw1-4LXcu=^(w$O{u43NEij8x{N`D z>o&J+kUtWyL2z|m4Sw~-=!vgJ4z!^_G!9g)SIOZT?eJRghcB+3@|v5|3r|;qRoQdMO>%54v_|{+B&dteS%^UfZeXpr@W|H}p#x z)rCUsS5krcw!8WvS}G87yQ|;ur2;i>r|Ex-#)QG)p#RGs&psRnqI=b0H>H}H{3!Rs z?!bw%Mp6Nw!|s%+{*N^fk~^2im??=J7TSbeAm$%TUdONDfS1udhnz%LN*(;BG9 z^fBi96g0I>Dbaq^wWld#dF+CyrW8IR!J}?c9lIe~RV0s^XsnUlQKD%v-91Vv`>wz_ zzhq`t1VLnXxk=SEFd-W^t4`k#fzajR>hYLDK4-=}%p}&@3)NWM!1)>U`}tOWZqFdpbKP0leDN7b}PWUhS2t zUw|P3c(o_@5+G3HuJPc`SIs(s9)V0_wB&z!`H956EuL;$sm5$;@i1+u zU{V3k7B8#XPbx4{i-)x|w7^}y#TzmKhf!C#(9dx#-q7*>|JZsofb8CTlV#m%<#xx1 zqpRNT_;AGd?)Y%T`R@2|^mMR0KAc6v?WW;=U#N$JqT6%ERje+o{Lwv?we(BxPab&a z!TB|_9@slHVWoKvpVeHYl<1n+?dfZ^R3M6OuT=fAA{B_D+rw3n+L9EIPkGMxkOVry zKoMcmeeqNrla4W`yi!`yg_DyQ5@hVS5EK}vJ?DxLls0E5B2c&Ut!|f{4M+dFAR%4G}|~^G1$B_&}hRJMY=Q^;OG3hV^;RFZJu` zXD!Gy#)ZbSNvk1u!6YTsNa}*=0jYrJ1=9mkfsrnl9*_$3zy;F-N`aDJpdOg32Lr}; zo-?ZAy2>BjU2}i!f_oOo#A_<=S#V$D zAB&2b=uuzKM)IU4deqOVQ-@TLr;p#Uf1#q1|2@lho@o48v9%z()b~pK8!#15+Hzd0)%e?b!SCnTo;^C%a7R|Yo``=sol|7uw}^> zprcbW)f=T!pcxE2nBGKDM9DBCUnU@lTdNLBG!CGvkqjmyni->0;ZqQdeC__u{dlkE zA1dd(bAI=n-#PcQ*|LeBeU7{G;H&P4iOv^6)x-w;`{Kl>@$ZaU`g>1pBmTX0($AbP z+TWOTy)XOs$D~NJ+M*5Zmu7x%WzCD+kqH^|ik-A~+4IZg`k3^*Vz(&mJ;~f_tDcOt zZ-4n#CzqWuBfDbR^5u_D&;F!t(nAkrS1x}%yK-4}>BILw^vDBsGP`W$lDg&Dm5={> z-3lnP|C)X9k(G}uuggAKw|vFYWe-KoWw-Renl8hva(7m#z-7hCGRlK zKFOnJ*w2E*+Uc@{V)lyCd=+!ov6%ks6#;A&6UB0zy4#QL9L*MWN6E*)Uw6b`45N0lRJlnnMm}tWnV8HKTW3RCy@O#*}Sylvo51Tqu-lD=s8}tUxzjo`Q-Uyxp zzBhvBfbWgy;ehXDBSslK4)|VHHO-2E?`0FG%DfIAVqb96zUIY!5xxL^UxY7!-^Wt6 z*#z+W*tlu~D1hI`YOc2;0kRp_$u5QOxoQ3>bN>{~?l_S9l!;0+(_Cu0&zM(bS`9Kr zfx=EbD9|aNv5})f4;Ty-dQcxj5qVG>4itJo{v-27nK5#%OVbFN@^DmY$3Idp6)U!p z%OG=YEBWXhh;~-{sKp>l*fRRS8f2qwoA{t*kWHE*cen(FCgR7Ldx6EOo^!MJEm^uE z+kURI+Q}VfVuVR4MkJhI-WU@OWMD!uB2pl}6Rc`<=mCS1QA#nFNPxm-1O+@!Qbu}7 zi&2KBm^Ze&KNC;XA5(u#)l%xox|g7VeV+gbA_1-C#N*F?P9Kg z%Q>2imM*lw*M+%LnW2bs_u=t%Veav?cb<8pLptN|fTA)|z~ekE_-L{mHcU7SGk1!a zF>!DQ(RNuJSq`(rX!{jl!)(lWE21>RtadWOMU%y_KVD?+6}z;@sDCapX^J0H`{O0% zRqIel{ys*7ra01Iyh|);NF)20iLkD+gk8lXu&%NyJ2NGauCkID zGvz!rZxd;+o(BWLjlr95@LTF}fHHg~08`Hs6;=dHJ+HJ&Zvrs&yn4J`pxDGlxr)2L z47@kzr&n>X_>rSBRy*YC;2+=YU9cJqm;|8(_-Yd@?KTVWZG-t!Qb6==dHzV5=E#L~{skACg6g+_G`pB#o!!Kf=ET6LOc2f4iqS^1iC2u2W5^4W z^M&B<1J_J{f#Y1L4ykc+C=}tK#5u-cbC5qU_x8;^ku(lezd{iXN-UPHmrV1Qc+qxq zN~a87;y9&KBPVG?*cw!I-7;^h%^5o6yw&zFM0jkqJq!^!Z{^iBin}Du&25Ui;Il41 zcUy!)J+{p<2RmhGn1cwEZI-ztn0p1x{nPEQSkjV;G>Dosh)C%bB8^fiN%Nh&=#WXN z1kgLVpOOE9lu9V2wxH|nAJ1#E!9a%*+PFC&Lxe{gPw6!eB7$k-*Nl^cbcQIQoH~LB z-uZH2M}$DDO-F=4>2z>&#DWJ9I=DGvLByio!Oam1BDRGNjw9AM)UN~uf1L+YHjfV8 z>Si~*ZmV9Rs$M7}V$pTh-|XKi~XC@aemY7eUq?kFSVte-t0(u%D;& zArfB^-+n&lCeWQ_Pu;o7ZRWAQ;gw>v`dfYNPS19m{^wv!4XP`Yhvlpw8xG^y?>p-FYn_6H<* zLDc;L5%e6i{ZX3c@A0BR(;ua9OU7 zh>&`0c_D(n9$Q|BC~psym!?!HG2ct<1^;`P&+Cnbpb61yxq}(mS?(ZWh+g7urw$V7 z^aq*ub}#IY5U9reQIIrJKR4&(QdQ%Aj%}0H5Qqq}pI7TU8bk!y&!^9px2qsA+Oc5l z`*+Se7Qs_-kMTsU0ifa@C2sS>q#Z9I8m%Qk*DQ*guUvj@vk)NX44ylcK zKK@3GLpCso2yDpm4-t+-mVbx{Y>2kzkLjMBB5u#O-}^zov*>&TL5sk7TL9RQk?j(Q zfH=>`>T4ZD6yQA1&Q^3nL;=q8X|oi6DVoj0n!opZ_}t-W3_5iVTmE2&F@`OF5HZHE z$A0&7|)cirjXqPO1Wodqu7dy#lQ;RUC7D#a0Buo!MMA(}IHsGgY*n!h&-YSY-Z-2e-DJRn^{L%6t?NVvFA#4}! zuJ#}O;Z7&FT}a}Wlb-6kLwGf+07^#cQ0U{WQXrNc!W<@*0uk&G=fSk(15B2g~80x9RQ#^DGPe5BTy>cPLaMQQUG+Pz{*Qk@-&6HTexFY?H~aVqHY6- zV0O1iWb9X9?H1+woQJPK+AZ)(_8hf(TCIJ1f|rgxpW72bP(XWxd1y87`<_#6LfO<`o=}RC)-9iv{3Y_)e!|n_w^1@W-CLlS{>92x@;&+r*QS`O}pqg5-rm8^n%qHMMW3+ zPLbA6O0Wi0r*83s`3yT4MPobL#8_a_Amx`n5EQ^{JP+m{qMdR^%jrKxZb$nO@q5C#;H z3oS427J4AL-wAJ|>UYAV;zFSpOr?Ow?`XlKJAMXf@I%7=z3@=793utDghrDI5mX-% z3ElJ%0e47@nxqO05yKr4xbKys-Vl94oBn&SM(Y46%J7~k%o7308SwUA$Qvnu_`dM2 zktx}J`{$nH{5iOL@Gkt@GWa*SKM8M8Fr5q*+HyF5A-wsK0?uCw?^c}*JqHd6_f%Lk zqH9QqF@{g|EO=aalMOriSr7`nq+~q(EO=a?`BnTmXaI3Sc%`8Wtte3Fp99E+igrTa z=K$pb1>}=a(e$$*6t-wkC<|3)DgmNCE!=Na35Z47C7?hBa*9>jCQgflKI-8!0yr(I zZ!{&KseW3_y%_;2XO!TynD=d10+&i~CMtoeO7K-w0w{oZR+oT^=K7Us{c>bZ^ut!hg95Bv z7Pyyb9`V+g&UsfN>h)_R6qb7Z8hIt6UcE+M73LQP!X&Ps&_Bs21qQh)%+(tTQ2l>2 zgc%dl9x`f4`$xvh#nikn&_DqMrO@*SdDr{y(>~1~;-3(qu+xV&=X&2fP2elysrSv( z1Vq%X-k(0h)DFKj0PNbQL86Ms&BdsAPx#^diK*f}>DzV3g#wVPd^LZlcrhwogYT{O zZS7*Jc58gh5|tH9sCG~JVePy$|Ap_a3u{Nq_%D3Crsp&*9xZlj!zet+uz*6p;$=ty zR@VBsj#v*EJmZJ!kf(M&DC{}}1w5Yd)jEW@0Rg!QZ{mh}HS=<$(Z3fd3uS^|cQo|~ zd%f>I8&Xeu6BL$ux|*!_&4YSIZO`ld(fXGPh@gJGf9*^|y$9;o`?H=6srQKaTo5Mh zdW!lD5%o|Y$&C^99#Nn7z2=a5{5}fiT72&=9Wu5{zR7oA44I}~Zj&!4BO-wj(a%S` z=AeKDH2PeuEI`8ZzIn4J1q@#B%~4m$S`jxy!T=sG_{GU^5XhVTaON>JOQ6tC$^=2R z*zAw0qUUs4p%C^K-`x>1O8a!Oi}-e@7){vP}gtwy}wnz zUuWTQO^`>c%7%;%Vw0UNLS?tZXAhmi~&q+j)u`Yjm} zMuDg=V-Vr`s$Vfmj>l^dT)hLqizg;dKM*<4h62$#P_+(_!%fDaD>!@N=9yi7GOeRg z6)zOwpv3Ji&NJoMf&HQ{+W-0FG$(V&_p9V?I;xk`QT+qo{X_e+r)D~JANY7~r=EkJ zdhSC@zmid1DD-}%6sYfqzIlmO3WWTjZyxcL0yX}SrvE(#69z|unP+~m;7B15-K&oH zN!`rkN4X#IM^0BYQVIYa@n_DGKQKT@?r88oXZ}8Sv=E54#G{2kGz*UUBWpt-+7^%c zQ>H4;fQ)kY(duH7rh@nRW}&CgRJOlVU}UAx9=Rg=;ib>V>O(z9E)@JXe)pP zeuC(yHPDPHF=jsnO>I+7w148ev$e50c0trr3LlZ+Cw@X7yCFK&Bu|)bt&!a)MAH*= z_b8|A2Z9^Uu9`OxK@iykenNK*OvuL1=+ieuAPo4pdNgPRggzLYKfC(7gAoF;JQyL+ zD1#9KF+4~JeGdArBF#_wMW=%QI`?tvq>mSxC(0061!dXX{_@wW{Kd_2lO2=*ZjPIa z6+{3x$IJCkzz_l499Me@5U6om;_k_~K3PD9bxRyq&8Gx<1Tu}$*4}WXF}9>FZrWC< zG27bWm^M@}rGRH!JfquBDKJu79BXN4fxCQLe9SZ)Mt$ut9xs0#OXbaaE+YBn9N- zac^o!0v%zXh%o8Cc)SpkjxopM<+P*=FQ+gR$mA&@C@@aMz3W0y+MJSPzxt+;>r(jVi$4y|00UnHIW(sOZt zlza>=S4r(~A%9!K`RU>dw&kEi%Uy_@JqjWQyAZF`XKIKT>Oy?{1cVO+YPoOXZXk8b zL5B4=aak^(p`W!N(-;@?V~d@2xr;U_rAAU0Z4W2~JTKZFPzsE6(e{8+pa(A69?%Mu z{37+hy=E|AT#0)Vs&2{N@j%_fE0;dFR3%=QeQ@bR`58%P&TaUtnI!;f#4DCrrBDFn z`X_m%KoD2r_(`6AI3xu|te5#$lFnnd)yo1X>ZW>WrjRlORK3jT`=wF_!UB0o@S(?dQ zFfCz pb::EventSwap { pb::EventSwap { @@ -135,3 +137,36 @@ pub fn vcb_debit( new_balance: Some(new_balance.into()), } } + +#[derive(Clone, Debug)] +pub struct EventCandlestickData { + pub pair: DirectedTradingPair, + pub stick: CandlestickData, +} + +impl TryFrom for EventCandlestickData { + type Error = anyhow::Error; + + fn try_from(value: pb::EventCandlestickData) -> Result { + fn inner(value: pb::EventCandlestickData) -> anyhow::Result { + Ok(EventCandlestickData { + pair: value.pair.ok_or(anyhow!("missing `pair`"))?.try_into()?, + stick: value.stick.ok_or(anyhow!("missing `stick`"))?.try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventCandlestickData::NAME)) + } +} + +impl From for pb::EventCandlestickData { + fn from(value: EventCandlestickData) -> Self { + Self { + pair: Some(value.pair.into()), + stick: Some(value.stick.into()), + } + } +} + +impl DomainType for EventCandlestickData { + type Proto = pb::EventCandlestickData; +} diff --git a/crates/proto/src/gen/penumbra.core.component.dex.v1.rs b/crates/proto/src/gen/penumbra.core.component.dex.v1.rs index 47bd11a9a1..3878535c9e 100644 --- a/crates/proto/src/gen/penumbra.core.component.dex.v1.rs +++ b/crates/proto/src/gen/penumbra.core.component.dex.v1.rs @@ -1555,6 +1555,27 @@ impl ::prost::Name for EventValueCircuitBreakerDebit { ::prost::alloc::format!("penumbra.core.component.dex.v1.{}", Self::NAME) } } +/// Emitted whenever there's non-empty candlestick data for a particular pair. +/// +/// Beware: if there's no activity on a given pair, there's no guarantee +/// that a candlestick will be emitted. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventCandlestickData { + /// The pair the candlestick is for. + #[prost(message, optional, tag = "1")] + pub pair: ::core::option::Option, + /// The candlestick for this particular pair. + #[prost(message, optional, tag = "2")] + pub stick: ::core::option::Option, +} +impl ::prost::Name for EventCandlestickData { + const NAME: &'static str = "EventCandlestickData"; + const PACKAGE: &'static str = "penumbra.core.component.dex.v1"; + fn full_name() -> ::prost::alloc::string::String { + ::prost::alloc::format!("penumbra.core.component.dex.v1.{}", Self::NAME) + } +} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct DexParameters { diff --git a/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs b/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs index ef2766ee27..cb4fbff28d 100644 --- a/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs +++ b/crates/proto/src/gen/penumbra.core.component.dex.v1.serde.rs @@ -2225,6 +2225,118 @@ impl<'de> serde::Deserialize<'de> for EventBatchSwap { deserializer.deserialize_struct("penumbra.core.component.dex.v1.EventBatchSwap", FIELDS, GeneratedVisitor) } } +impl serde::Serialize for EventCandlestickData { + #[allow(deprecated)] + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut len = 0; + if self.pair.is_some() { + len += 1; + } + if self.stick.is_some() { + len += 1; + } + let mut struct_ser = serializer.serialize_struct("penumbra.core.component.dex.v1.EventCandlestickData", len)?; + if let Some(v) = self.pair.as_ref() { + struct_ser.serialize_field("pair", v)?; + } + if let Some(v) = self.stick.as_ref() { + struct_ser.serialize_field("stick", v)?; + } + struct_ser.end() + } +} +impl<'de> serde::Deserialize<'de> for EventCandlestickData { + #[allow(deprecated)] + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + const FIELDS: &[&str] = &[ + "pair", + "stick", + ]; + + #[allow(clippy::enum_variant_names)] + enum GeneratedField { + Pair, + Stick, + __SkipField__, + } + impl<'de> serde::Deserialize<'de> for GeneratedField { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + struct GeneratedVisitor; + + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = GeneratedField; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "expected one of: {:?}", &FIELDS) + } + + #[allow(unused_variables)] + fn visit_str(self, value: &str) -> std::result::Result + where + E: serde::de::Error, + { + match value { + "pair" => Ok(GeneratedField::Pair), + "stick" => Ok(GeneratedField::Stick), + _ => Ok(GeneratedField::__SkipField__), + } + } + } + deserializer.deserialize_identifier(GeneratedVisitor) + } + } + struct GeneratedVisitor; + impl<'de> serde::de::Visitor<'de> for GeneratedVisitor { + type Value = EventCandlestickData; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str("struct penumbra.core.component.dex.v1.EventCandlestickData") + } + + fn visit_map(self, mut map_: V) -> std::result::Result + where + V: serde::de::MapAccess<'de>, + { + let mut pair__ = None; + let mut stick__ = None; + while let Some(k) = map_.next_key()? { + match k { + GeneratedField::Pair => { + if pair__.is_some() { + return Err(serde::de::Error::duplicate_field("pair")); + } + pair__ = map_.next_value()?; + } + GeneratedField::Stick => { + if stick__.is_some() { + return Err(serde::de::Error::duplicate_field("stick")); + } + stick__ = map_.next_value()?; + } + GeneratedField::__SkipField__ => { + let _ = map_.next_value::()?; + } + } + } + Ok(EventCandlestickData { + pair: pair__, + stick: stick__, + }) + } + } + deserializer.deserialize_struct("penumbra.core.component.dex.v1.EventCandlestickData", FIELDS, GeneratedVisitor) + } +} impl serde::Serialize for EventPositionClose { #[allow(deprecated)] fn serialize(&self, serializer: S) -> std::result::Result diff --git a/crates/proto/src/gen/proto_descriptor.bin.no_lfs b/crates/proto/src/gen/proto_descriptor.bin.no_lfs index a1b2955a571836ef3b777301c5b908cafb93b694..1b37ab32d8b5c5dc7cafc4dfe8c1efe6e9a36c26 100644 GIT binary patch delta 12150 zcmZWvdwf*YozFdU&pCJQblb1+U~A)T_cQ|N`XSu+HQ9Q zp%jqDFu)+#RH+hV36BOubnBxinhI!_XjIq*X#pV+2m(q8((ms)=1xERNAj89@BV(@ z-}5~0{mah&?f>1M`&ZVve(KH_WZAZUU-oepBuo1K9RHv1yA=P2_M_k1`#pvKlluRM zv!LF$s(qr6{MP2-i}w^S$A3CEB4AvfEJx*e`p!9gi+y))Ja*p$@yhB)=EUa?8!-F+Vbf;M z9yn}n^_;4i5B@u8+!w2wIk!4qIenNPyX&F2ftfYCx@y);JZgOGg`rM@o=u%Hb!_a1 zl@D8y%4yY)RL+?3c&0ty&+$1q=PK{gq#!bqZ=WKR1-zh>B|}7ZEwQ9WWVb1nlu>uY z0zSUn&!xECtf*MX*>WDz&)qD5E#xxWFZBgIro=2t&RAd8dl7eiKhKw%%fRM2x&Eq< zk%L}Mon~M;FZ{l4C*{2A;iT4@kZ*i0XH0SyTISi^Ggka3;g}OS076Pe?RYggC&8#pO3iS|3c#fsyR7m$EmxU z1AI=*0G|WDndjS{=fH2~J$f2I4*X_bI>M5`Z{~xB`xBM>XoHsI%$FzK(-Oc7z_$eO z0`M(CdIaEGc;~JLPXNA!mkhQf;9Gd#L4HcR5AtBr-}%hF2LpTw{DT3$1pYyuZ<|d5 z{~#|dGk_BK2YIhimLxzv^g90?h3~ScIKtVVlg=w|CysF0$&55cP4_u>OH8YQh7~aE z&;tXV@;UF)HLU@GqnR4i$6y2+)P_ehH9-G~yIqZq1YwrFQ%i^-{LVHexx> znQbKxy#vyYY7ezI&Lg&r9~-)QlSC2$Mh7o8O-T&c?BJy_zZWe6U_jt%a{cf6B(7!x(V};i=M<)q zsWq?iA~XC*4?tIW_a0`j`BC>8XLjE6;TTAp1|M!+;}JWH`M|oyOYF$>fpm@M4jt}~ zSK~Gkb?1xZ=gAwBHLr_``63My0nB_6DK_bg0A{}EWT)N;VCIXm-u@JoO)QiP1xtxg z$>`W^sXwlFi<4(x9}rzAkkDpF1+Ny%#mRwhd^}|_l3`*67j_nlbQZyYrNyGSe;@%{ zi^ZV9{$mCR0?U%+Z$3U{StgJJpk2F9x7@7{Vdu__ZO1dmo_dPK9qDp71^l#s({L0%>L4={Dj zj=HM_8*WmbjkMfwWrW4jJZ8RdvmYx@$%9AMdz*QZ~y0 z(Aoef2S95@#FjD#Kx;**U50W1v{u|O#=o0()5t|#vi+TZ9+}NCut>7A?EeAn?XWjz3pa@h!os+L!|M*7h40y26D9lzFBs3aDzMlO+ z!kI?Vv(!IEyNO(iX;*UeyC?466(G>OvnxQLaCQl^*@6ZTb_uiD0*UE;moS?xkXRsg z32e4XQNLUi{56pr_*}Q-)9>-oui2{Ss;Xy_h***D(NdpF%ja&vrk3nt$)7!UJh`Rm z-g`jnj>l8@w>$8UV%RP6^>z|Z;oojCW~^!#knnG}7yj)+u`Z~6z-6uBh42Td`i z96vD}_5@aFlGYQnam+%b?n%TVvS!Vq8;z;sT|~wg%*Y(qsn}NT4*? z*2qOzdudSJnM;Feuk8;|&;qIZ10>S3*Y-zVRD2+^j+_3-gX9Oo>*9Y!<;|l8X-*dH zy?l4GbqPG!X}0MD36*ABUXXC9*_Ib1(${Ru3limRrt;E|%A?G;Q1+6~?Gxi$0uwYK zT5RqhMshZHAYr0~a<^GKiEs`j|Fm!G-G>4Ms_~(~NwRcEm>qMTs_`L#1(fCxkZ|&l zDAT7kkZ|&l7&6koRXGW3hm&Xa-#-3u08hm|EF%3302TMJ=&xHSPZjqtwGhpjc~sn^ zy11`@AjTaHP^h>^ZE+z7l%uw|AYu5ZEp8s-Ii`x6N5ws6iwg=`Aa!v;A{)nSar2|% z3z2oz6gMA|UkI<0N`5}owpA_B<@gOBTdiXt;aIE9KS(IH+WdorW39AEe@Z9re9G;m z#?n6?bndwnK+q&`$rb=IBxJh;Bp@z{9{PX>5(T&LL81Vc#NZJsfB7_;FYEmM z`hXaBIj}+d&&xJ{kb{lOHh&;t^FfuUD;f^@dP!Ayz9pb@u8OTGBSM9KT^^bzp{yzgn7&B%Z>|YH2o-AYo>;M32%CEFfyFq-{&`^-sl3YXcM- z$ZMt9v_K9hYo%FwK`K47X(>Qu>)_0R?~N`{yB2A7Euf%QCruB5gxhrz3-HnqR^Vt< z)XS{b8kc;w#3`(oUN`@JTBV|NR%noHYh(XIw>yaj=~I3M@u|L>rQ1stK+y;t486Nm z4EVBHn$4tQz=O@Qs7Igz*Eh>pSsD`pTcq1NEkIQR!(b-pGyu>o6b0Sd5h#^zi;VUR z7y#WOG4s;lJW6hEldOlT9Vh^T)NKF~ncXHMh4v}1w#g#B&%;w7ZIie!dzM-~s^-2O z$+`bmm)H?NkV8A9xpIptcXr5VFM~*(v_tmoXPiOKUzX+!7gfyy(mF%S!OH;>HOtE~ zs&5J)45XK(-YzHL49K054KQ|ST?c8Q=qhZdG`Fdc1=LPippSzfm2SWO#wn=2BAI=N zK#8dPfa1L{@Tr>}pITDveR_Q^pnP1qlXNh2F?dq4GwGsHx=u>j-Q-hU5T26m0FxbkK?sJPQVLyt zL3m1{`IY|(Z~)ON-Mq94%_v~#7Xze1MQfFKF`!hy0DU?rn!X?e!xjw;C85epC4kp$ zl6|F0Kv|?!0t{3jp|VQLM4OE0tsWl3fi_uoqbUIm^)@-~COD{+QGzx({;PBe7?t2` zPy(h(a4skT7(i^-C7_})uTxZ}LuaXI46T==fYDw!yp_>S)ai75f0|<(t#}DGZ zCfP4M8b6eOLIlGOA6lHRNpm%Ur|{>RG*=TKQM+q$$WT)|ymbKBuwQs~_#{G9yeGW$ z_z9`v&GYQM!@vOKd{2!ZDqe_+_oU}8^la@ys&+s3FiKQZ5TV*F^3t_)qhg82ehITG z2A8JsB_8h86FMy}O?FGuUbvtk0fxTMD30?&k@^~M*l?427pbrDMx@fIcPaCUr_(_*SEar-NIe(`a(R$?mr|ef+?DCn<83Q4 zx5{(x&@N-S6wi3hDI!V&r$ny_@|pkx65!~4v64U%)_CUjo){2V>zS>t zqBSFI5(xolto5>U(ndf(>!n8?SEB?BeYs2!REuZ5t|fG_PBRqTuJhQ2bf#(P2E%5W zBhz)B*~dZ}4%B&_^m+>tnXdD&-s%YouciRjZ$moMoHG6EAkkbU`Z;1Gbi;xJ$mc23 zX$Cx87o?t3>VM<8FQ!w^RqEG!?$opnb$z|(PF8Qj3vn^C(PMdPLs1w;-Zy%YTs;8P z-3*a;SsK>Nb{Zu z(doX|zY!?Vf&$ViP_^D5g|SBAO^+4!8X9|G)|}M5Hc^y3e7bb_n_f;-`=;ujNkUzb zn;3RCMcBo7S)Vr!Ju}!TeAn|z{H?TcFQSe6UXQgjKHN6kiSPAr4Nn~iK6T(eTMR{` z)?nz>OEFOGeV)0NRtyBN&oh_%ih+vnqsib-g9(8Tlh@9Ed&-BIKy=Fb(96-SO?s4x z54|o!RK*knKp%R;NBG}1KnU(YGI{Qw5(hGYXn{PC2}EP!fY+s88i*Fj1Kz+vDrZ1O z@js%u#sp2}|Hw1bK0T%){w)KR6+>$zvgn1(M;_)P>PS){=l_G@ni)MHKy^ zq4y730>%%Q~4{`sJtjUwF8$nO^|UiYdw!jnx+xc=xUdo9Ms<@QScGW`P9o zig1yBH4GBKE5d4J0RlDd$}syPtk){gkX{+a!E=d3kANl{4UONs`c&wlhOlW{#m3lc z2xHh#!4w0U4dFuFeu@E04PnfzX$hR{8^YZOV{7VZ6?#wC5biO^{~uVROh<40v%YT%_N#C%!*t&k&Z_a9lEY4HN__6LIaT+^=OQ7g!M<}auPy9#^?!_( z>qPC)o+^$wKe@Nvwj7vfx%RMGu|UFDd$^O{VS|LJ_Hge$a32WNa_7VBm%eT}(2zbK z_KW-)ddUTvY+Ov8i8!^1i#905Mo<@R4=4sSFWMeZ3|P8odq6SJ0~c)%Xa-7tk$PaN zF$RpU!)~9FiLu+J$7fboJy4|rkH;RUnvuGi?To3wW1U$7pho=KW>zs2K#_h`uNZLR z>o8u`(_2Jhz+y)@_0t^Z#}yr!P}EHwneosZ&=D@wr%j~^sE#o1L=M{2fH5z1W3KbR z-WfH*EahmapQ{fJt z1IjCBPM;Bgg(0CNyJbQ=H{TB72@>^qF8* z)^A0s;A5vKHSUi7-x3U_7nRXwmnlU4G ze>@h~?!p*-`8WZYKEb{2{s@fc3DhhnIV&ycr9M|Ar^rACPU2zu3KH%^pB8VZpm@;H zulzv*@Fee~zuEzb!kwh=J;o?}JZ$Bxd&yw^Ib=p!C?jgl{{KnQc4(Du<$urgreT6w zrdxTmvvGr#j8^(ggkFnI#1zQ0I=H^sp9rgM95?&d!H0;=6U-)2}ly< z-Su_^Byi4iY)5WWu3)5GNR2(gDn?IK>(K?yJY$h;T;LeV+5uX7E|3GiqFu;DBEL>G zo?y?wb(%1~4m4@H_?n0H+ZQ|~QI08Wx>AID*nLwcGFn;iJ&QF->&g`#*7u5d z3dJj2-77Np=BW6FXU!8jqc=nH8yqbBQNX;JGWvu_4Liw(O$C7jJRxv;Ajd&M^a=ZF z2qZwCpcjZ#rb#%Ci>&1abP^<=6kcZqI*C9RiqwArbTkNfif=`?UNqyGJzI)>&Mi<02fg+tUYeZOo0E?$ktPxl) zr~@X^UbR-Rt|i|~9d2b^Qi;>7d@8tr0NZAmE{1DGSbqbGr+}yx>bpJWPR1J|k@Xu> zw#krOA-tacm(-b)scvgh1!vgVQTPp8YlN8_KtgnlFmnS)fUcol_)n#Xhjpn@ZR{tb zC#$N}S#LnnGn;#mP^_ce&(kJRwson`+t|%Q=87R0sn^IC{n$BF$oe;)eCcb0uqY#qMIIXAYr0j zV7!ruDN(U8_~c{?BsU7L56-WyJB3VaN{u|nZuky<19Owj6i7&JvY7%2%uROGRH$!0 zerw#WfF!>8=<5##W(8qx6RcZFkJPratlJPXOM?%5JkZk#NN8;nQQaXRfxm5$)6Vc0 T!k>#jjz57vxzlO)E}!}I_gYHD#sd0kYKlo6QjOF=7>vOPf%krE5BJ>u0bkZS zYyI|GzqR&o_PH1TyZUl-HTRG2a{aLIH(vR3RehDt2SMq$di=k0+@tt^auxl3sA@g_ zuc`jA^Fh;Zt8ei9&Z9A3CV5rP)}|{{KfAhOr*Qm)A9Hh^q&w((#r~F<@4C4xC+R-S z*_6_UV@-RXu5r?3lP8z0TCsBFgOkdBxU~Ad`^r|Ye6Vcwin8V3`_6s$-?P*&Td{iC z(v@YaAN>B(RZy1wd)dABufBif(y|{cUAb!citm;CexA<>*j30GQIMD@v`-Pn!@OvS z6=Nh0FZFLT&K}7lCfmo7#HvYtKE>?jBt;!(n|Vw>c5?t$$7QZR#uxOMlBy53Hb)I2ZW|9Q;V!EZy;yN$nazPy3EUdGUY;Rc?_#c&!A@+R(B8BrSF7y0q zm9#@7{+heV(qLfUxC;E0XR^f;#P(}mSmcj4rf}UG%-uh+VsEw?(YQC;4Dz!uD+zY) zA5#8owke}WWV+2)$s>{0rc?Vz@Vi^1z(2_g#~1_`_$PTqr4@mHl8^h06@h<} zPrJ$ghsFm~TX5I&J$JQ5_?(mxJ_mjqFDx|(9QbW~)MzULzm1QXW<}t)@d;D?xhj0L zL3{A{3pIDQNALph?Gd~He0xNX0DL`h z_!9UX5xxX|2QReECV}6<$CMjD3H%OTaf1~JkWaqeKc?_qmK5)B_Fgdk@Js1;xEx|; z8l$Frm%F8=)gWUODD2dO0-f?MA2vMW0fVk=59(tmA`fcAu51s;f9LLSGe(**nnutR zXGW!V{5$nhu3{UxbaQ4~$wTizw6oenExLKameB)NHy>f!!~-qeyn4L9j}a7_NSxv9 z5|5Q`XJubmwtQ7tQ(sHDlRm@cP?J)ONI1*gktQ6-z=UFGq(FRUdFhCZ2Ml_nlwyoX zfWl@31w48wBfX@>D8qByt+XUS0T*b-WCqbD6=O3lNacKXSTq5lh{B2i^n7+$VCq6t zn;4@h1%<5*6!5r!+N3jspvf*~yU;9wB68tyxfqo*MoZ#H*|}5WLlNaJ;PFv*?r6ro z>`bMTRvO!OF(%>CN0ZUgg%dm-XPCQWss>g2YIoXe&h$BK|XSnnQVU2y~dec_k08f(bC`}tZO`BS1})0 z*LbO&nLd!N@%+hC{n={XCX#Ng2)crsgWvsD%&ip}m;_*IMWV#emjFzy7-E;+1Yl}K z`B;CUViOx>onSu?+}|xo)(Noq7e{5RcF48CH-Fc@a4i@x2|^3-wIV|u6o6kVN~$9Z z46s&AnCSn|0Ks5=u=U;HKBPf6Q>(g^N*4Z5*&zu-Il?kFb zTXEWGc8HQ;{z&q|obMqHMaN6Cw)GFD6b?{1wt{9?<-~)+S~R zL}SpYbHMTkJB%@4`Gbft1}uMt2gmH6$HcM%b|uIc%|oN|_|lV)*&5(BN1X8D8&pLNnKeh?AUI%)ETh`iTHlQ%@< zy-p(U3)I*}bj7kkvMs@x_VWuiNb_z2HI2Mcns*Z=V-zU#L0Bn}#YTyDlS-UPNCAUQ z!KhPJ6`Q21kAX@HvrTfyQ2$?y8RX5u(o-|io28qtj!I-k71%6`lImnz1S}CqwpHrx zhm7WdLNBinQI$ZNQ!+#h9mtXT76~F42;?+9*C2v{tx}9rm4^rhw#uT)T-BpRG;k>C z@VEefW0)`+hKe6bb9}^83=>Loe1wQ$LWv%wDOf~OgQR0iaPCcUOGAW06S+Z}LksMH z(jd*&3sL!*LrW1VyF&&=fBD*tMe5Wd&8Y5k19P}QI?m}zCPzmKfbN%A zdFhorNnsw4Y?P`UBmhFxZ2%F>9*~KYeG04tvRI$<@DxY~ByP#JQ>!P{+IJ{;>g~tV zhaw0H=#VrQZAlf*A(^Z&h}21kIGS(UjiX2->Pa86kMAnvk#Fdi57{j zx|*Y)X!&oJY0yo(5-0&wb3`_b4@+i~hXIKeif*+IM?um1`e9jQD?_(hhp88I*-)4k z$@J_^GISh?7U>4M!D@-3q8IrVnbcQGum)6%tQ_ZeQfsG@;+V{Nx#{A&9ZvcfuA9^f zk)nm`Wyy|9eK!mlATLWGLN`4`z@3o8t5ty^Vz?6$?|TKPH$;!nCjAwx(KRS!86jm&ak7e1) z1`ubZTaa;~6$J|YIe=WKXlErp2PhXPAooT^)7OGf*rGw9EL54P1c>^)WS^=M5R0@+ zK!FOR6|1yOoRPv2-uVw56UY>OYb0B11hCa2lXms6L_#$t#jQU}aF^y-f3nyT)YNy&6%kZzG|w)a%>Gs}c3;Hu9P@Ul<6J zxPn4|l2Hl_a!s07Zzw?Z|IrX;OiX*os3q+mDK{Nc^FB)h1rU@%&mZJn>#;{Ynm@!p zAwprN4{gr1p1GR9Q^Zs2nX3tis9mi$X|kyuzBK@B%A+7r#lzAuD&86|Gk;>Lcn^Da z-C_Xc_;RhuieD zrp2YjZbK#t7cwlM(6@LgQh=2W9$rVR2MjiPnRUojJ0BEw9fATL8$GoSA#OlG-h?}G zL%o`LY0~KLMan{%;Oma29$|0s*e^5G)7}JyrJi0*ws_`3J*BqiE#3(Ir2-N4%z?fCV)AT&yfW!sDK~vnK@%p76|3SIJrtH$=h!9#43= zd6_{VKj~#=9#^vj3Vo$a5LAmNz2T*FolYwh!rtYveHo@{>xRNI&B638&zxgnjR1Ce zL-c+N5lrv$u;1zhii7E0Ue&$~)0~)oDk7RIqMs&3S~n~-fZRo-Nam-DD zjpsae6l~C8Bt-}5=e#_9mkbG`K-4c|5aIfqS2Elmi`yW$x-SJg&yKt4rO1Ic6o}S= zs`U~%%rp+iJeI1MTz2n@mEoPg7sWwA@0h8_yu752P1QeJgu4>=GMsLTu^)RmA2j`2 z??fkc!t+Y~19WgNri1&RJ@&VzU!I%lEd8^G%XjKP=&1u=x5ZF0Y7K?ny_5pge%&+o z(n^5@UiZwEzEYs#uhU|1hrxuw8^P2IUtIV`HW0mKz2W8Q)+Rq<;tg-uBvmn`0MHxW z)M@?~4G@w$75w4Cm(r)QfoOv~l?_C5;*>Y6Dg#8DsA68aDh(J3dna+0-1c5e|)Yv@g+n6mE*Zv3cjc2@*JWOsQ9 z-8C>F8!x5L;t+w*<>5u7ULzp%?x6ppwR5^71Y)^6LZDH)BLrf&n-Dr2^j%3(^m;kx zg8%A!v#{60ea)I8L{>ssZfkn_<2rB2wz$a-N&s(*o7XIe0Nxfa)*pl+0(e_o?JPi` z#%+kR-nc$zK!$Zg950@aNc0F~8slJ7{nhocWe4M?ZIv2x?_eC$h6<(>@H`k#>Go3! zjC3%Ll{I65H~WL}krQz=^|T9pojVvGHNk(Knb82!()=UG`HGWn$sUeg{93YyBgR{@ zha=8gvWKIKgqG~#92%~LhTENMhJ&If?v5|rnp<|;y=ANE!|#_@+`sZWOBbwYzCGp? ziZmYUxk@R~OJh&myksi{qUebi>+dW|fhc<7cyXk*Bn9L%ad$$71Uk?_5nnN?SD)5h%S#p3MfOGsxL^F&&L`xoE>a8=p4a zKSvFQ<-RYTv(-*L(oE?xDVmCL`49}=xz7T)bUodtbyZ@B*eT5brn!=-Rh!uipX zOSa{pM9W=@n;i=x2D=m=qEFZmG1R5_*h+*C1ZufY;wzuDbNF#Z4YP#N`9GoV6hnt7+2$NW$E0q z+wNKVz17R_U9J*eT6XX9`@+e2&WuHPteGVMYQ(FSS*1__#rgxiQXq({aeSbs&xoYJ zh_!zBOrG<=qFO%-in^)RH&aL%0;<+e>9u21 zdSCc{ksr4Ykjr`>EBh@OTMV=z{6V2Jr)q;Q;#sz|^KS5QPP#c`3(cmmqtKbTY?Ch% zS+?r5>gVeh2MQMry~Q^#l1kQ!s1E8l>gVYnASeq2vc<=BA9c7=Amc4R{c(}g{Np0_ zjZtoQUCa-6?svzBTWeya;gOq|2v>eJHYYstidz?!&5wPidH6KebyM?G_p*XSV$Va2 zeyNv}^D7b7v^&M&<42rC^WXl1&AfjceMXy;6z}nzi(KCX&%x;Laohx7j~<^x1a^mS z{)0`q0e_=5_+g4ry9-2Ec5|FjX&-}#D7$%ai9bU*;^FCV=7;R2`E%4Q>S_Ct9U>;# zY5Q9Ph-At~>P-tGa87eG`KkH#F%Oml+#9Mu=czS^hZSep zlp7%+Qv6gyM+y<9mw8-|0TG~=c{1hG4-w`Oor65AJIm(6la`0U$digR$m9A;GoHe8 zkbY0MKzX7_*Le8JSvKc}d8$hINrfGgV6XAGK0@OuJg;$FxPMl8LV74%*30I@kt*|$ z$T(7E9uk=$sWJ}<47o@-qB3j3w|ZF(9H|~_B1fvn8WGo@(eV_HYXojpzNs8xTNmDW zj(ru5bhn7#Uf59S1*J~J^)Dvz6pnQQD-HF+JUZsohi{%^-});4hGTu?NH>Y~BCdZb zh^KI@7wYF-%&kd^bt2~%rdTzwTqnHIejhby4OMHSU}H+(cfupDva0aVdG?2!p~hg) z>mQIngx5wfME?d1BJelThsgg_76mMZHg0}+^wKYgH&;a?2@T>Rts OC-6@;KhWoW+VlUYaHoj? diff --git a/proto/penumbra/penumbra/core/component/dex/v1/dex.proto b/proto/penumbra/penumbra/core/component/dex/v1/dex.proto index 0ec1b5a9b8..862af7abaa 100644 --- a/proto/penumbra/penumbra/core/component/dex/v1/dex.proto +++ b/proto/penumbra/penumbra/core/component/dex/v1/dex.proto @@ -737,6 +737,17 @@ message EventValueCircuitBreakerDebit { num.v1.Amount new_balance = 3; } +// Emitted whenever there's non-empty candlestick data for a particular pair. +// +// Beware: if there's no activity on a given pair, there's no guarantee +// that a candlestick will be emitted. +message EventCandlestickData { + // The pair the candlestick is for. + DirectedTradingPair pair = 1; + // The candlestick for this particular pair. + CandlestickData stick = 2; +} + message DexParameters { // Whether or not the DEX is enabled. bool is_enabled = 1; From 3cc323416071d1b35aaaff6bf2ac872c43fd74c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Tue, 8 Oct 2024 11:50:45 -0700 Subject: [PATCH 35/43] pindexer: add a nicer error when a checkpoint genesis is used (#4891) This adds an informative error explaining that we need the *original* genesis file, before any upgrades, and why. Closes #4880 - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > indexing only --- crates/bin/pindexer/src/lib.rs | 1 + crates/bin/pindexer/src/parsing.rs | 27 +++++++++++++++++++ .../bin/pindexer/src/stake/validator_set.rs | 20 +++++--------- crates/bin/pindexer/src/supply.rs | 16 +++++------ 4 files changed, 41 insertions(+), 23 deletions(-) create mode 100644 crates/bin/pindexer/src/parsing.rs diff --git a/crates/bin/pindexer/src/lib.rs b/crates/bin/pindexer/src/lib.rs index 42e319d134..353aaaf0b2 100644 --- a/crates/bin/pindexer/src/lib.rs +++ b/crates/bin/pindexer/src/lib.rs @@ -5,6 +5,7 @@ pub use indexer_ext::IndexerExt; pub mod block; pub mod dex; pub mod ibc; +mod parsing; pub mod shielded_pool; mod sql; pub mod stake; diff --git a/crates/bin/pindexer/src/parsing.rs b/crates/bin/pindexer/src/parsing.rs new file mode 100644 index 0000000000..51bd104059 --- /dev/null +++ b/crates/bin/pindexer/src/parsing.rs @@ -0,0 +1,27 @@ +use anyhow::{anyhow, Context as _}; +use penumbra_app::genesis::{AppState, Content}; +use serde_json::Value; + +const GENESIS_NO_CONTENT_ERROR: &'static str = r#" +Error: using an upgrade genesis file instead of an initial genesis file. + +This genesis file only contains a checkpoint hash of the state, +rather than information about how the initial state of the chain was initialized, +at the very first genesis. + +Make sure that you're using the very first genesis file, before any upgrades. +"#; + +/// Attempt to parse content from a value. +/// +/// This is useful to get the initial chain state for app views. +/// +/// This has a nice error message, so you should use this. +pub fn parse_content(data: Value) -> anyhow::Result { + let app_state: AppState = serde_json::from_value(data) + .context("error decoding app_state json: make sure that this is a penumbra genesis file")?; + let content = app_state + .content() + .ok_or(anyhow!(GENESIS_NO_CONTENT_ERROR))?; + Ok(content.clone()) +} diff --git a/crates/bin/pindexer/src/stake/validator_set.rs b/crates/bin/pindexer/src/stake/validator_set.rs index fd0abf344c..bc4dc59a18 100644 --- a/crates/bin/pindexer/src/stake/validator_set.rs +++ b/crates/bin/pindexer/src/stake/validator_set.rs @@ -1,9 +1,9 @@ use std::collections::BTreeMap; -use anyhow::{anyhow, Context, Result}; +use anyhow::{anyhow, Result}; use cometindex::{async_trait, sqlx, AppView, ContextualizedEvent, PgPool, PgTransaction}; -use penumbra_app::genesis::AppState; +use penumbra_app::genesis::Content; use penumbra_asset::asset; use penumbra_num::Amount; use penumbra_proto::{core::component::stake::v1 as pb, event::ProtoEvent}; @@ -12,6 +12,8 @@ use penumbra_stake::{ IdentityKey, }; +use crate::parsing::parse_content; + #[derive(Debug)] pub struct ValidatorSet {} @@ -45,10 +47,7 @@ impl AppView for ValidatorSet { .execute(dbtx.as_mut()) .await?; - let app_state: penumbra_app::genesis::AppState = - serde_json::from_value(app_state.clone()).context("error decoding app_state json")?; - - add_genesis_validators(dbtx, &app_state).await?; + add_genesis_validators(dbtx, &parse_content(app_state.clone())?).await?; Ok(()) } @@ -147,14 +146,7 @@ impl AppView for ValidatorSet { } } -async fn add_genesis_validators<'a>( - dbtx: &mut PgTransaction<'a>, - app_state: &AppState, -) -> Result<()> { - let content = app_state - .content() - .ok_or_else(|| anyhow::anyhow!("cannot initialize indexer from checkpoint genesis"))?; - +async fn add_genesis_validators<'a>(dbtx: &mut PgTransaction<'a>, content: &Content) -> Result<()> { // Given a genesis validator, we need to figure out its delegations at // genesis by getting its delegation token then summing up all the allocations. // Build up a table of the total allocations first. diff --git a/crates/bin/pindexer/src/supply.rs b/crates/bin/pindexer/src/supply.rs index ff51f7adb7..35c7545890 100644 --- a/crates/bin/pindexer/src/supply.rs +++ b/crates/bin/pindexer/src/supply.rs @@ -1,8 +1,8 @@ use std::collections::{BTreeMap, HashSet}; -use anyhow::{anyhow, Context, Result}; +use anyhow::{anyhow, Result}; use cometindex::{async_trait, sqlx, AppView, ContextualizedEvent, PgTransaction}; -use penumbra_app::genesis::{AppState, Content}; +use penumbra_app::genesis::Content; use penumbra_asset::{asset, STAKING_TOKEN_ASSET_ID}; use penumbra_num::Amount; use penumbra_proto::{ @@ -16,6 +16,8 @@ use penumbra_stake::{rate::RateData, validator::Validator, IdentityKey}; use sqlx::{PgPool, Postgres, Transaction}; use std::iter; +use crate::parsing::parse_content; + mod unstaked_supply { //! This module handles updates around the unstaked supply. use anyhow::Result; @@ -820,7 +822,7 @@ impl<'a> TryFrom<&'a ContextualizedEvent> for Event { /// Add the initial native token supply. async fn add_genesis_native_token_allocation_supply<'a>( dbtx: &mut PgTransaction<'a>, - app_state: &AppState, + content: &Content, ) -> Result<()> { fn content_mints(content: &Content) -> BTreeMap { let community_pool_mint = iter::once(( @@ -843,9 +845,6 @@ async fn add_genesis_native_token_allocation_supply<'a>( out } - let content = app_state - .content() - .ok_or_else(|| anyhow::anyhow!("cannot initialized indexer from checkpoint genesis"))?; let mints = content_mints(content); let unstaked_mint = u64::try_from( @@ -911,9 +910,8 @@ impl AppView for Component { // decode the initial supply from the genesis // initial app state is not recomputed from events, because events are not emitted in init_chain. // instead, the indexer directly parses the genesis. - let app_state: penumbra_app::genesis::AppState = - serde_json::from_value(app_state.clone()).context("error decoding app_state json")?; - add_genesis_native_token_allocation_supply(dbtx, &app_state).await?; + add_genesis_native_token_allocation_supply(dbtx, &parse_content(app_state.clone())?) + .await?; Ok(()) } From 94b0058158d222f82781d9cbfd98d013d1c16037 Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Tue, 8 Oct 2024 12:09:18 -0700 Subject: [PATCH 36/43] chore: release version 0.80.6 --- Cargo.lock | 96 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 49 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e20260455c..d077d5243a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1233,7 +1233,7 @@ dependencies = [ [[package]] name = "cnidarium" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "async-trait", @@ -1269,7 +1269,7 @@ dependencies = [ [[package]] name = "cnidarium-component" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "async-trait", @@ -1307,7 +1307,7 @@ dependencies = [ [[package]] name = "cometindex" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "async-trait", @@ -1668,7 +1668,7 @@ dependencies = [ [[package]] name = "decaf377-fmd" -version = "0.80.5" +version = "0.80.6" dependencies = [ "ark-ff", "ark-serialize", @@ -1683,7 +1683,7 @@ dependencies = [ [[package]] name = "decaf377-frost" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -1698,7 +1698,7 @@ dependencies = [ [[package]] name = "decaf377-ka" -version = "0.80.5" +version = "0.80.6" dependencies = [ "ark-ff", "decaf377", @@ -4213,7 +4213,7 @@ dependencies = [ [[package]] name = "pcli" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -4295,7 +4295,7 @@ dependencies = [ [[package]] name = "pclientd" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "assert_cmd", @@ -4347,7 +4347,7 @@ dependencies = [ [[package]] name = "pd" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -4500,7 +4500,7 @@ dependencies = [ [[package]] name = "penumbra-app" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -4588,7 +4588,7 @@ dependencies = [ [[package]] name = "penumbra-asset" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -4628,7 +4628,7 @@ dependencies = [ [[package]] name = "penumbra-auction" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -4683,7 +4683,7 @@ dependencies = [ [[package]] name = "penumbra-auto-https" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "axum-server", @@ -4695,7 +4695,7 @@ dependencies = [ [[package]] name = "penumbra-bench" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-bls12-377", @@ -4739,7 +4739,7 @@ dependencies = [ [[package]] name = "penumbra-community-pool" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -4771,7 +4771,7 @@ dependencies = [ [[package]] name = "penumbra-compact-block" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -4806,7 +4806,7 @@ dependencies = [ [[package]] name = "penumbra-custody" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "argon2", @@ -4842,7 +4842,7 @@ dependencies = [ [[package]] name = "penumbra-dex" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -4904,7 +4904,7 @@ dependencies = [ [[package]] name = "penumbra-distributions" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "async-trait", @@ -4922,7 +4922,7 @@ dependencies = [ [[package]] name = "penumbra-eddy" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -4940,7 +4940,7 @@ dependencies = [ [[package]] name = "penumbra-fee" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -4967,7 +4967,7 @@ dependencies = [ [[package]] name = "penumbra-funding" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "async-trait", @@ -4990,7 +4990,7 @@ dependencies = [ [[package]] name = "penumbra-governance" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -5044,7 +5044,7 @@ dependencies = [ [[package]] name = "penumbra-ibc" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -5081,7 +5081,7 @@ dependencies = [ [[package]] name = "penumbra-keys" -version = "0.80.5" +version = "0.80.6" dependencies = [ "aes", "anyhow", @@ -5128,7 +5128,7 @@ dependencies = [ [[package]] name = "penumbra-measure" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "bytesize", @@ -5146,7 +5146,7 @@ dependencies = [ [[package]] name = "penumbra-mock-client" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "cnidarium", @@ -5163,7 +5163,7 @@ dependencies = [ [[package]] name = "penumbra-mock-consensus" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "bytes", @@ -5183,7 +5183,7 @@ dependencies = [ [[package]] name = "penumbra-mock-tendermint-proxy" -version = "0.80.5" +version = "0.80.6" dependencies = [ "hex", "pbjson-types", @@ -5198,7 +5198,7 @@ dependencies = [ [[package]] name = "penumbra-num" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -5235,7 +5235,7 @@ dependencies = [ [[package]] name = "penumbra-proof-params" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ec", @@ -5263,7 +5263,7 @@ dependencies = [ [[package]] name = "penumbra-proof-setup" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ec", @@ -5290,7 +5290,7 @@ dependencies = [ [[package]] name = "penumbra-proto" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "async-trait", @@ -5324,7 +5324,7 @@ dependencies = [ [[package]] name = "penumbra-sct" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -5360,7 +5360,7 @@ dependencies = [ [[package]] name = "penumbra-shielded-pool" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -5414,7 +5414,7 @@ dependencies = [ [[package]] name = "penumbra-stake" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -5467,7 +5467,7 @@ dependencies = [ [[package]] name = "penumbra-tct" -version = "0.80.5" +version = "0.80.6" dependencies = [ "ark-ed-on-bls12-377", "ark-ff", @@ -5499,7 +5499,7 @@ dependencies = [ [[package]] name = "penumbra-tct-property-test" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "futures", @@ -5511,7 +5511,7 @@ dependencies = [ [[package]] name = "penumbra-tct-visualize" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "axum", @@ -5541,7 +5541,7 @@ dependencies = [ [[package]] name = "penumbra-tendermint-proxy" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "chrono", @@ -5573,7 +5573,7 @@ dependencies = [ [[package]] name = "penumbra-test-subscriber" -version = "0.80.5" +version = "0.80.6" dependencies = [ "tracing", "tracing-subscriber 0.3.18", @@ -5581,7 +5581,7 @@ dependencies = [ [[package]] name = "penumbra-tower-trace" -version = "0.80.5" +version = "0.80.6" dependencies = [ "futures", "hex", @@ -5602,7 +5602,7 @@ dependencies = [ [[package]] name = "penumbra-transaction" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-ff", @@ -5655,7 +5655,7 @@ dependencies = [ [[package]] name = "penumbra-txhash" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "blake2b_simd 1.0.2", @@ -5668,7 +5668,7 @@ dependencies = [ [[package]] name = "penumbra-view" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-std", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "penumbra-wallet" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-std", @@ -5812,7 +5812,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pindexer" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "clap", @@ -7665,7 +7665,7 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "summonerd" -version = "0.80.5" +version = "0.80.6" dependencies = [ "anyhow", "ark-groth16", diff --git a/Cargo.toml b/Cargo.toml index d471f6fab0..c6939c3ff5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,7 +104,7 @@ push = false [workspace.package] authors = ["Penumbra Labs Date: Wed, 9 Oct 2024 13:00:35 -0700 Subject: [PATCH 37/43] build: allow building client crates without rocksdb (#4892) This required: - feature gating `cnidarium` in `penumbra-app` - removing the stateless transaction check in the view crate - removing the ceremony contribution command from `pcli` Closes #4885 ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > Only client code was changed, the non-client code that was changed was only changed to have the public API be reduced without certain default features enabled. PD should be completely unchanged by this fix. ## How to test this: run `cargo build --bin pcli -p pcli` (and pclientd, and pindexer) and observe that rocksdb is not built. --------- Co-authored-by: Conor Schaefer --- Cargo.lock | 2 +- Cargo.toml | 7 +- crates/bin/pcli/Cargo.toml | 4 +- crates/bin/pcli/src/command.rs | 7 +- crates/bin/pcli/src/command/ceremony.rs | 259 ------------------------ crates/bin/pcli/src/main.rs | 1 - crates/bin/pclientd/Cargo.toml | 2 +- crates/bin/pd/Cargo.toml | 2 +- crates/bin/pindexer/Cargo.toml | 2 +- crates/core/app/Cargo.toml | 62 ++++-- crates/core/app/src/lib.rs | 51 ++--- crates/core/asset/Cargo.toml | 1 + crates/crypto/tct/Cargo.toml | 1 + crates/view/Cargo.toml | 4 +- crates/view/src/service.rs | 17 +- crates/wallet/Cargo.toml | 10 +- 16 files changed, 90 insertions(+), 342 deletions(-) delete mode 100644 crates/bin/pcli/src/command/ceremony.rs diff --git a/Cargo.lock b/Cargo.lock index d077d5243a..367c0504aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4257,7 +4257,6 @@ dependencies = [ "penumbra-keys", "penumbra-num", "penumbra-proof-params", - "penumbra-proof-setup", "penumbra-proto", "penumbra-sct", "penumbra-shielded-pool", @@ -4512,6 +4511,7 @@ dependencies = [ "bitvec", "blake2b_simd 1.0.2", "camino", + "cfg-if", "cnidarium", "cnidarium-component", "decaf377", diff --git a/Cargo.toml b/Cargo.toml index c6939c3ff5..7ae2d4d471 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,6 @@ [workspace] -# Set virtual workspace's resolver to v1, to support the "rust-docs" script. -resolver = "1" +resolver = "2" exclude = ["tools/proto-compiler", "tools/parameter-setup"] @@ -167,7 +166,7 @@ once_cell = { version = "1.8" } parking_lot = { version = "0.12.1" } pbjson = { version = "0.6" } pbjson-types = { version = "0.6.0" } -penumbra-app = { path = "crates/core/app" } +penumbra-app = { default-features = false, path = "crates/core/app" } penumbra-asset = { default-features = false, path = "crates/core/asset" } penumbra-community-pool = { default-features = false, path = "crates/core/component/community-pool" } penumbra-compact-block = { default-features = false, path = "crates/core/component/compact-block" } @@ -240,7 +239,7 @@ tower-service = { version = "0.3.2" } tracing = { version = "0.1" } tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } url = { version = "2.2" } -getrandom = { version = "0.2", default-features = false, features = ["js"] } +getrandom = { version = "0.2", default-features = false } # TODO(kate): # temporarily point these dependencies to a tag in the penumbra-zone fork. diff --git a/crates/bin/pcli/Cargo.toml b/crates/bin/pcli/Cargo.toml index 79bbdb9606..7c123ddff9 100644 --- a/crates/bin/pcli/Cargo.toml +++ b/crates/bin/pcli/Cargo.toml @@ -27,7 +27,6 @@ parallel = [ "penumbra-stake/parallel", "penumbra-transaction/parallel", "penumbra-wallet/parallel", - "penumbra-proof-setup/parallel", ] [dependencies] @@ -60,7 +59,7 @@ indicatif = {workspace = true} jmt = {workspace = true} ndarray = "0.15.6" once_cell = {workspace = true} -penumbra-app = {workspace = true} +penumbra-app = {workspace = true, default-features = false} penumbra-asset = {workspace = true, default-features = false} penumbra-community-pool = {workspace = true, default-features = false} penumbra-compact-block = {workspace = true, default-features = false} @@ -72,7 +71,6 @@ penumbra-governance = {workspace = true, default-features = false} penumbra-ibc = {workspace = true, default-features = false} penumbra-keys = {workspace = true, default-features = false} penumbra-num = {workspace = true, default-features = false} -penumbra-proof-setup = {workspace = true} penumbra-proof-params = { workspace = true, default-features = true } penumbra-proto = {workspace = true, features = ["rpc", "box-grpc"], default-features = true} penumbra-sct = {workspace = true, default-features = false} diff --git a/crates/bin/pcli/src/command.rs b/crates/bin/pcli/src/command.rs index 420964e6e6..025bc4d534 100644 --- a/crates/bin/pcli/src/command.rs +++ b/crates/bin/pcli/src/command.rs @@ -7,9 +7,8 @@ pub use tx::TxCmd; pub use validator::ValidatorCmd; pub use view::ViewCmd; -use self::{ceremony::CeremonyCmd, tx::TxCmdWithOptions}; +use self::tx::TxCmdWithOptions; -mod ceremony; mod debug; mod init; mod migrate; @@ -64,9 +63,6 @@ pub enum Command { /// Manage a validator. #[clap(subcommand, display_order = 900)] Validator(ValidatorCmd), - /// Contribute to the summoning ceremony. - #[clap(subcommand, display_order = 990)] - Ceremony(CeremonyCmd), /// Display information related to diagnosing problems running Penumbra #[clap(subcommand, display_order = 999)] Debug(DebugCmd), @@ -82,7 +78,6 @@ impl Command { Command::Validator(cmd) => cmd.offline(), Command::Query(cmd) => cmd.offline(), Command::Debug(cmd) => cmd.offline(), - Command::Ceremony(_) => false, Command::Threshold(cmd) => cmd.offline(), Command::Migrate(_) => false, } diff --git a/crates/bin/pcli/src/command/ceremony.rs b/crates/bin/pcli/src/command/ceremony.rs deleted file mode 100644 index f8095aed44..0000000000 --- a/crates/bin/pcli/src/command/ceremony.rs +++ /dev/null @@ -1,259 +0,0 @@ -use anyhow::{anyhow, Context, Result}; -use rand_core::OsRng; -use tokio::sync::mpsc; -use tokio_stream::wrappers::ReceiverStream; -use url::Url; - -use penumbra_asset::Value; -use penumbra_keys::{keys::AddressIndex, Address}; -use penumbra_num::Amount; -use penumbra_proof_setup::all::{ - Phase1CeremonyContribution, Phase1RawCeremonyCRS, Phase2CeremonyContribution, - Phase2RawCeremonyCRS, -}; -use penumbra_proof_setup::single::log::Hashable; -use penumbra_proto::{ - penumbra::tools::summoning::v1::ceremony_coordinator_service_client::CeremonyCoordinatorServiceClient, - tools::summoning::v1::{ - participate_request::{Identify, Msg as RequestMsg}, - participate_response::{Confirm, ContributeNow, Msg as ResponseMsg}, - ParticipateRequest, ParticipateResponse, - }, - view::v1::GasPricesRequest, -}; -use penumbra_view::Planner; - -use crate::App; - -fn max_message_size(phase: u8) -> usize { - match phase { - 1 => 200 * 1024 * 1024, - _ => 100 * 1024 * 1024, - } -} - -#[tracing::instrument(skip(app))] -async fn handle_bid(app: &mut App, to: Address, from: AddressIndex, bid: &str) -> Result<()> { - let gas_prices = app - .view - .as_mut() - .context("view service must be initialized")? - .gas_prices(GasPricesRequest {}) - .await? - .into_inner() - .gas_prices - .expect("gas prices must be available") - .try_into()?; - - let value = bid.parse::()?; - - // If the bid is 0, skip creating a transaction. For instance, this allows reconnecting - // without paying extra. - if value.amount == 0u64.into() { - return Ok(()); - } - - let mut planner = Planner::new(OsRng); - planner.set_gas_prices(gas_prices); - planner.output(value, to); - let plan = planner - .memo("E PLURIBUS UNUM".into()) - .memo_return_address(app.config.full_viewing_key.payment_address(from).0) - .plan( - app.view - .as_mut() - .context("view service must be initialized")?, - from, - ) - .await - .context("can't build send transaction")?; - app.build_and_submit_transaction(plan).await?; - Ok(()) -} - -#[derive(Debug, clap::Subcommand)] -pub enum CeremonyCmd { - /// Contribute to the ceremony - Contribute { - /// The phase of the summoning ceremony that's currently active. Must match that of the remote - /// coordinator. - #[clap(long, default_value = "2")] - phase: u8, - /// The URL for the public coordination server. - #[clap(long, default_value = "https://summoning.penumbra.zone")] - coordinator_url: Url, - /// The Penumbra wallet address of the coordination server. Bids will be sent to this - /// address, so the coordinator can compute the contributor's place in the queue. - #[clap( - long, - default_value = "penumbra1qvqr8cvqyf4pwrl6svw9kj8eypf3fuunrcs83m30zxh57y2ytk94gygmtq5k82cjdq9y3mlaa3fwctwpdjr6fxnwuzrsy4ezm0u2tqpzw0sed82shzcr42sju55en26mavjnw4" - )] - coordinator_address: Address, - /// Amount to spend during bid. Must be specified typed values, e.g. '50penumbra'. - /// Only the 'penumbra' token is accepted for contributions. Bids are additive, - /// so if you're disconnected, you can bid '0penumbra' and your previous bids - /// will be still be counted when computing your place in the queue. - #[clap(long)] - bid: String, - }, -} - -impl CeremonyCmd { - #[tracing::instrument(skip(self, app))] - pub async fn exec(&self, app: &mut App) -> Result<()> { - match self { - CeremonyCmd::Contribute { - phase, - coordinator_url, - coordinator_address, - bid, - } => { - println!("¸,ø¤º°` initiating summoning participation `°º¤ø,¸"); - - let index = match *phase { - 1 => AddressIndex { - account: 0, - randomizer: b"ceremnyaddr1" - .as_slice() - .try_into() - .expect("12 bytes long"), - }, - 2 => AddressIndex { - account: 0, - randomizer: b"ceremnyaddr2" - .as_slice() - .try_into() - .expect("12 bytes long"), - }, - _ => anyhow::bail!("phase must be 1 or 2."), - }; - let address = app.config.full_viewing_key.payment_address(index).0; - - println!( - "submitting bid {} for contribution slot from address {}", - bid, address - ); - - handle_bid(app, coordinator_address.clone(), index, bid).await?; - - println!("connecting to coordinator..."); - // After we bid, we need to wait a couple of seconds just for the transaction to be - // picked up by the coordinator. Else, there is a race wherein the coordinator will kick the - // client out of the queue because it doesn't see the transaction yet. - tokio::time::sleep(std::time::Duration::from_secs(2)).await; - - let (req_tx, req_rx) = mpsc::channel::(10); - tracing::debug!(?address, "participate request"); - req_tx - .send(ParticipateRequest { - msg: Some(RequestMsg::Identify(Identify { - address: Some(address.into()), - })), - }) - .await?; - let mut client = - CeremonyCoordinatorServiceClient::connect(coordinator_url.to_string()) - .await? - .max_decoding_message_size(max_message_size(*phase)) - .max_encoding_message_size(max_message_size(*phase)); - println!( - r#"connected to coordinator! -You may disconnect (CTRL+C) to increase your bid if you don't like your position in the queue. -Otherwise, please keep this window open. -"# - ); - use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle}; - let progress_bar = ProgressBar::with_draw_target(1, ProgressDrawTarget::stdout()) - .with_style( - ProgressStyle::default_bar() - .template("[{elapsed}] {bar:50.blue/cyan} position {pos} out of {len} connected summoners\t{msg}"), - ); - progress_bar.set_position(0); - progress_bar.enable_steady_tick(1000); - - let mut response_rx = client - .participate(ReceiverStream::new(req_rx)) - .await? - .into_inner(); - let unparsed_parent = loop { - match response_rx.message().await? { - None => { - progress_bar.abandon(); - anyhow::bail!("Coordinator closed connection") - } - Some(ParticipateResponse { - msg: Some(ResponseMsg::Position(p)), - }) => { - tracing::debug!(?p); - let len = p.connected_participants; - // e.g. displaying 1 / 2 instead of 0 / 2 - let pos = p.position + 1; - progress_bar.set_length(len as u64); - progress_bar.set_position(pos as u64); - progress_bar.set_message(format!( - "(your bid: {}, most recent slot bid: {})", - Amount::try_from( - p.your_bid.ok_or(anyhow!("expected bid amount"))? - )?, - Amount::try_from( - p.last_slot_bid.ok_or(anyhow!("expected top bid amount"))? - )? - )); - progress_bar.tick(); - } - Some(ParticipateResponse { - msg: - Some(ResponseMsg::ContributeNow(ContributeNow { - parent: Some(parent), - })), - }) => { - progress_bar.finish(); - break parent; - } - m => { - progress_bar.abandon(); - anyhow::bail!("Received unexpected message from coordinator: {:?}", m) - } - } - }; - println!("preparing contribution... (please keep this window open)"); - let (contribution, hash) = if *phase == 1 { - let parent = Phase1RawCeremonyCRS::unchecked_from_protobuf(unparsed_parent)? - .assume_valid(); - let contribution = Phase1CeremonyContribution::make(&parent); - let hash = contribution.hash(); - (contribution.try_into()?, hash) - } else { - let parent = Phase2RawCeremonyCRS::unchecked_from_protobuf(unparsed_parent)? - .assume_valid(); - let contribution = Phase2CeremonyContribution::make(&parent); - let hash = contribution.hash(); - (contribution.try_into()?, hash) - }; - println!("submitting contribution..."); - - req_tx - .send(ParticipateRequest { - msg: Some(RequestMsg::Contribution(contribution)), - }) - .await?; - println!("coordinator is validating contribution..."); - match response_rx.message().await? { - None => anyhow::bail!("Coordinator closed connection"), - Some(ParticipateResponse { - msg: Some(ResponseMsg::Confirm(Confirm { slot })), - }) => { - println!("contribution confirmed at slot {slot}"); - println!("thank you for your help summoning penumbra <3"); - println!("here's your contribution receipt (save this to verify inclusion in the final transcript):\n{}", hex::encode_upper(hash.as_ref())); - } - m => { - anyhow::bail!("Received unexpected message from coordinator: {:?}", m) - } - } - - Ok(()) - } - } - } -} diff --git a/crates/bin/pcli/src/main.rs b/crates/bin/pcli/src/main.rs index 3f56af08c1..43be79b1bf 100644 --- a/crates/bin/pcli/src/main.rs +++ b/crates/bin/pcli/src/main.rs @@ -63,7 +63,6 @@ async fn main() -> Result<()> { Command::View(view_cmd) => view_cmd.exec(&mut app).await?, Command::Validator(cmd) => cmd.exec(&mut app).await?, Command::Query(cmd) => cmd.exec(&mut app).await?, - Command::Ceremony(cmd) => cmd.exec(&mut app).await?, Command::Threshold(cmd) => cmd.exec(&mut app).await?, Command::Migrate(cmd) => cmd.exec(&mut app).await?, } diff --git a/crates/bin/pclientd/Cargo.toml b/crates/bin/pclientd/Cargo.toml index ff7dfc4944..40c0906f8a 100644 --- a/crates/bin/pclientd/Cargo.toml +++ b/crates/bin/pclientd/Cargo.toml @@ -26,7 +26,7 @@ http = {workspace = true} http-body = {workspace = true} metrics = {workspace = true} parking_lot = {workspace = true} -penumbra-app = {workspace = true} +penumbra-app = {workspace = true, default-features = false} penumbra-asset = {workspace = true, default-features = true} penumbra-custody = {workspace = true} penumbra-keys = {workspace = true, default-features = true} diff --git a/crates/bin/pd/Cargo.toml b/crates/bin/pd/Cargo.toml index e9fccdeb32..79f0b127db 100644 --- a/crates/bin/pd/Cargo.toml +++ b/crates/bin/pd/Cargo.toml @@ -59,7 +59,7 @@ metrics-util = "0.16.2" mime_guess = "2" once_cell = { workspace = true } pbjson-types = { workspace = true } -penumbra-app = { workspace = true } +penumbra-app = { workspace = true, default-features = true } penumbra-asset = { workspace = true, default-features = true } penumbra-auto-https = { path = "../../util/auto-https" } penumbra-compact-block = { workspace = true, default-features = true } diff --git a/crates/bin/pindexer/Cargo.toml b/crates/bin/pindexer/Cargo.toml index 2d6171b5c3..88e062b8ca 100644 --- a/crates/bin/pindexer/Cargo.toml +++ b/crates/bin/pindexer/Cargo.toml @@ -17,7 +17,7 @@ cometindex = {workspace = true} num-bigint = { version = "0.4" } penumbra-shielded-pool = {workspace = true, default-features = false} penumbra-stake = {workspace = true, default-features = false} -penumbra-app = {workspace = true} +penumbra-app = {workspace = true, default-features = false} penumbra-auction = {workspace = true, default-features = false} penumbra-dex = {workspace = true, default-features = false} penumbra-fee = {workspace = true, default-features = false} diff --git a/crates/core/app/Cargo.toml b/crates/core/app/Cargo.toml index f16d81a4e1..9013371d07 100644 --- a/crates/core/app/Cargo.toml +++ b/crates/core/app/Cargo.toml @@ -9,7 +9,28 @@ license = { workspace = true } publish = false [features] -default = ["std"] +component = [ + "dep:cnidarium", + "dep:cnidarium-component", + "penumbra-proto/cnidarium", + "penumbra-auction/component", + "penumbra-community-pool/component", + "penumbra-compact-block/component", + "penumbra-dex/component", + "penumbra-distributions/component", + "penumbra-fee/component", + "penumbra-funding/component", + "penumbra-governance/component", + "penumbra-ibc/component", + "penumbra-ibc/rpc", + "penumbra-sct/component", + "penumbra-shielded-pool/component", + "penumbra-stake/component", + "dep:tonic", + "dep:tonic-reflection", + "dep:tonic-web" +] +default = ["std", "component"] std = ["ark-ff/std", "ibc-types/std"] [dependencies] @@ -21,8 +42,9 @@ bech32 = { workspace = true } bincode = { workspace = true } bitvec = { workspace = true } blake2b_simd = { workspace = true } -cnidarium = { workspace = true, features = ["migration", "rpc"], default-features = true } -cnidarium-component = { workspace = true, default-features = true } +cfg-if = "1.0" +cnidarium = { workspace = true, optional = true, features = ["migration", "rpc"], default-features = true } +cnidarium-component = { workspace = true, optional = true, default-features = true } decaf377 = { workspace = true, default-features = true } decaf377-rdsa = { workspace = true } futures = { workspace = true } @@ -36,22 +58,22 @@ metrics = { workspace = true } once_cell = { workspace = true } parking_lot = { workspace = true } penumbra-asset = { workspace = true, default-features = true } -penumbra-auction = { workspace = true, default-features = true } -penumbra-community-pool = { workspace = true, default-features = true } -penumbra-compact-block = { workspace = true, default-features = true } -penumbra-dex = { workspace = true, default-features = true } -penumbra-distributions = { workspace = true, default-features = true } -penumbra-fee = { workspace = true, default-features = true } -penumbra-funding = { workspace = true, default-features = true } -penumbra-governance = { workspace = true, default-features = true } -penumbra-ibc = { workspace = true, features = ["component", "rpc"], default-features = true } +penumbra-auction = { workspace = true, default-features = false } +penumbra-community-pool = { workspace = true, default-features = false } +penumbra-compact-block = { workspace = true, default-features = false } +penumbra-dex = { workspace = true, default-features = false } +penumbra-distributions = { workspace = true, default-features = false } +penumbra-fee = { workspace = true, default-features = false } +penumbra-funding = { workspace = true, default-features = false } +penumbra-governance = { workspace = true, default-features = false } +penumbra-ibc = { workspace = true, default-features = false } penumbra-keys = { workspace = true, default-features = true } penumbra-num = { workspace = true, default-features = true } -penumbra-proof-params = { workspace = true, default-features = true } -penumbra-proto = { workspace = true, features = ["cnidarium"], default-features = true } -penumbra-sct = { workspace = true, default-features = true } -penumbra-shielded-pool = { workspace = true, features = ["component"], default-features = true } -penumbra-stake = { workspace = true, default-features = true } +penumbra-proof-params = { workspace = true, default-features = false } +penumbra-proto = { workspace = true, default-features = true } +penumbra-sct = { workspace = true, default-features = false } +penumbra-shielded-pool = { workspace = true, default-features = false } +penumbra-stake = { workspace = true, default-features = false } penumbra-tct = { workspace = true, default-features = true } penumbra-test-subscriber = { workspace = true } penumbra-tower-trace = { path = "../../util/tower-trace" } @@ -71,9 +93,9 @@ tendermint-light-client-verifier = { workspace = true } tendermint-proto = { workspace = true } tokio = { workspace = true, features = ["full", "tracing"] } tokio-util = { workspace = true } -tonic = { workspace = true } -tonic-reflection = { workspace = true } -tonic-web = { workspace = true } +tonic = { workspace = true, optional = true } +tonic-reflection = { workspace = true, optional = true } +tonic-web = { workspace = true, optional = true } tower = { workspace = true, features = ["full"] } tower-abci = "0.11" tower-actor = "0.1.0" diff --git a/crates/core/app/src/lib.rs b/crates/core/app/src/lib.rs index d915e9eaee..21e75bd50d 100644 --- a/crates/core/app/src/lib.rs +++ b/crates/core/app/src/lib.rs @@ -1,29 +1,8 @@ #![deny(clippy::unwrap_used)] #![cfg_attr(docsrs, feature(doc_auto_cfg))] -pub mod app; -pub mod genesis; -pub mod metrics; -pub mod params; -pub mod rpc; -pub mod server; - -mod action_handler; -mod community_pool_ext; -mod penumbra_host_chain; - -pub use crate::{ - action_handler::AppActionHandler, app::StateWriteExt, - community_pool_ext::CommunityPoolStateReadExt, metrics::register_metrics, - penumbra_host_chain::PenumbraHost, -}; - use once_cell::sync::Lazy; -/// Representation of the Penumbra application version. Notably, this is distinct -/// from the crate version(s). This number should only ever be incremented. -pub const APP_VERSION: u64 = 8; - pub static SUBSTORE_PREFIXES: Lazy> = Lazy::new(|| { vec![ penumbra_ibc::IBC_SUBSTORE_PREFIX.to_string(), @@ -34,5 +13,31 @@ pub static SUBSTORE_PREFIXES: Lazy> = Lazy::new(|| { /// The substore prefix used for storing historical CometBFT block data. pub static COMETBFT_SUBSTORE_PREFIX: &'static str = "cometbft-data"; -/// Temporary compat wrapper for duplicate trait impls -pub struct Compat<'a, T>(&'a T); +/// Representation of the Penumbra application version. Notably, this is distinct +/// from the crate version(s). This number should only ever be incremented. +pub const APP_VERSION: u64 = 8; + +pub mod genesis; +pub mod params; + +cfg_if::cfg_if! { + if #[cfg(feature="component")] { + pub mod app; + pub mod metrics; + pub mod rpc; + pub mod server; + + mod action_handler; + mod community_pool_ext; + mod penumbra_host_chain; + + pub use crate::{ + action_handler::AppActionHandler, app::StateWriteExt, + community_pool_ext::CommunityPoolStateReadExt, metrics::register_metrics, + penumbra_host_chain::PenumbraHost, + }; + + /// Temporary compat wrapper for duplicate trait impls + pub struct Compat<'a, T>(&'a T); + } +} diff --git a/crates/core/asset/Cargo.toml b/crates/core/asset/Cargo.toml index 8c17fbdcb1..76a5d2a2bb 100644 --- a/crates/core/asset/Cargo.toml +++ b/crates/core/asset/Cargo.toml @@ -30,6 +30,7 @@ decaf377-fmd = {workspace = true} decaf377-rdsa = {workspace = true} derivative = {workspace = true} ethnum = {workspace = true} +getrandom = {workspace = true, features = ["js"]} hex = {workspace = true} ibig = {workspace = true} num-bigint = {workspace = true} diff --git a/crates/crypto/tct/Cargo.toml b/crates/crypto/tct/Cargo.toml index aaae53d946..211051c579 100644 --- a/crates/crypto/tct/Cargo.toml +++ b/crates/crypto/tct/Cargo.toml @@ -20,6 +20,7 @@ blake2b_simd = {workspace = true} decaf377 = {workspace = true, default-features = true} derivative = {workspace = true} futures = {workspace = true} +getrandom = {workspace = true, features = ["js"]} hash_hasher = "2" hex = {workspace = true} im = {workspace = true, features = ["serde"]} diff --git a/crates/view/Cargo.toml b/crates/view/Cargo.toml index 4f43124d18..aebc7e6fa8 100644 --- a/crates/view/Cargo.toml +++ b/crates/view/Cargo.toml @@ -33,7 +33,7 @@ ibc-types = {workspace = true, default-features = false} metrics = {workspace = true} once_cell = {workspace = true} parking_lot = {workspace = true} -penumbra-app = {workspace = true} +penumbra-app = {workspace = true, default-features = false} penumbra-asset = {workspace = true, default-features = true} penumbra-community-pool = {workspace = true, default-features = false} penumbra-compact-block = {workspace = true, default-features = false} @@ -51,7 +51,7 @@ penumbra-shielded-pool = {workspace = true, default-features = false} penumbra-stake = {workspace = true, default-features = false} penumbra-tct = {workspace = true, default-features = true} penumbra-transaction = {workspace = true, default-features = true} -penumbra-auction = {workspace = true, default-features = true} +penumbra-auction = {workspace = true, default-features = false} prost = {workspace = true} r2d2 = {workspace = true} r2d2_sqlite = {workspace = true, features = ["bundled"]} diff --git a/crates/view/src/service.rs b/crates/view/src/service.rs index e0dccbeeff..7971258eac 100644 --- a/crates/view/src/service.rs +++ b/crates/view/src/service.rs @@ -186,22 +186,9 @@ impl ViewServer { transaction: Transaction, await_detection: bool, ) -> BroadcastTransactionStream { - use penumbra_app::AppActionHandler; - let self2 = self.clone(); try_stream! { - // 1. Pre-check the transaction for (stateless) validity. - transaction - .check_stateless(()) - .await - .map_err(|e| { - tonic::Status::unavailable(format!( - "transaction pre-submission checks failed: {:#?}", - e - )) - })?; - - // 2. Broadcast the transaction to the network. + // 1. Broadcast the transaction to the network. // Note that "synchronous" here means "wait for the tx to be accepted by // the fullnode", not "wait for the tx to be included on chain. let mut fullnode_client = self2.tendermint_proxy_client().await @@ -241,7 +228,7 @@ impl ViewServer { // The transaction was submitted so we provide a status update yield BroadcastTransactionResponse{ status: Some(BroadcastStatus::BroadcastSuccess(BroadcastSuccess{id:Some(transaction.id().into())}))}; - // 3. Optionally wait for the transaction to be detected by the view service. + // 2. Optionally wait for the transaction to be detected by the view service. let nullifier = if await_detection { // This needs to be only *spend* nullifiers because the nullifier detection // is broken for swaps, https://github.com/penumbra-zone/penumbra/issues/1749 diff --git a/crates/wallet/Cargo.toml b/crates/wallet/Cargo.toml index 30a645a3aa..6bce49542c 100644 --- a/crates/wallet/Cargo.toml +++ b/crates/wallet/Cargo.toml @@ -20,16 +20,16 @@ bincode = {workspace = true} bytes = {workspace = true} decaf377 = {workspace = true, default-features = true} hex = {workspace = true} -penumbra-app = {workspace = true} +penumbra-app = {workspace = true, default-features = false} penumbra-asset = {workspace = true, default-features = true} penumbra-custody = {workspace = true} -penumbra-dex = {workspace = true, default-features = true} -penumbra-fee = {workspace = true, default-features = true} -penumbra-governance = {workspace = true, default-features = true} +penumbra-dex = {workspace = true, default-features = false} +penumbra-fee = {workspace = true, default-features = false} +penumbra-governance = {workspace = true, default-features = false} penumbra-keys = {workspace = true, default-features = true} penumbra-num = {workspace = true, default-features = true} penumbra-proto = {workspace = true, default-features = true} -penumbra-stake = {workspace = true, default-features = true} +penumbra-stake = {workspace = true, default-features = false} penumbra-tct = {workspace = true, default-features = true} penumbra-sct = {workspace = true, default-features = false} penumbra-transaction = {workspace = true, default-features = true} From 6508c03550fc480cd60c1f6c7ff7dfa4af3e2960 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Fri, 11 Oct 2024 09:35:11 -0700 Subject: [PATCH 38/43] feat: implement DomainType for DEX events (#4893) ## Describe your changes Towards #4869; since doing that requires parsing some dex events, I took the opportunity to fill out the rest of the events in the dex. ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > event emission only ## Requested Review Some sanity checks that this PR doesn't accidentally change how events are emitted would be nice. --- .../action_handler/position/close.rs | 9 +- .../dex/src/component/action_handler/swap.rs | 4 +- .../component/action_handler/swap_claim.rs | 4 +- .../core/component/dex/src/component/arb.rs | 10 +- .../src/component/circuit_breaker/value.rs | 20 +- .../core/component/dex/src/component/dex.rs | 15 +- .../dex/src/component/position_manager.rs | 14 +- crates/core/component/dex/src/event.rs | 681 +++++++++++++++--- 8 files changed, 640 insertions(+), 117 deletions(-) diff --git a/crates/core/component/dex/src/component/action_handler/position/close.rs b/crates/core/component/dex/src/component/action_handler/position/close.rs index 9ba244126f..a13d8d8764 100644 --- a/crates/core/component/dex/src/component/action_handler/position/close.rs +++ b/crates/core/component/dex/src/component/action_handler/position/close.rs @@ -2,7 +2,7 @@ use anyhow::Result; use async_trait::async_trait; use cnidarium::StateWrite; use cnidarium_component::ActionHandler; -use penumbra_proto::StateWriteProto as _; +use penumbra_proto::{DomainType as _, StateWriteProto as _}; use crate::{component::PositionManager, event, lp::action::PositionClose}; @@ -25,7 +25,12 @@ impl ActionHandler for PositionClose { state.queue_close_position(self.position_id); // queue position close you will... - state.record_proto(event::queue_position_close(self)); + state.record_proto( + event::EventQueuePositionClose { + position_id: self.position_id, + } + .to_proto(), + ); Ok(()) } diff --git a/crates/core/component/dex/src/component/action_handler/swap.rs b/crates/core/component/dex/src/component/action_handler/swap.rs index 145262252e..8b69289ba0 100644 --- a/crates/core/component/dex/src/component/action_handler/swap.rs +++ b/crates/core/component/dex/src/component/action_handler/swap.rs @@ -5,7 +5,7 @@ use async_trait::async_trait; use cnidarium::StateWrite; use cnidarium_component::ActionHandler; use penumbra_proof_params::SWAP_PROOF_VERIFICATION_KEY; -use penumbra_proto::StateWriteProto; +use penumbra_proto::{DomainType as _, StateWriteProto}; use penumbra_sct::component::source::SourceContext; use crate::{ @@ -66,7 +66,7 @@ impl ActionHandler for Swap { ); state.add_recently_accessed_asset(swap.body.trading_pair.asset_2(), fixed_candidates); - state.record_proto(event::swap(self)); + state.record_proto(event::EventSwap::from(self).to_proto()); Ok(()) } diff --git a/crates/core/component/dex/src/component/action_handler/swap_claim.rs b/crates/core/component/dex/src/component/action_handler/swap_claim.rs index 48b35d228c..cf5408f305 100644 --- a/crates/core/component/dex/src/component/action_handler/swap_claim.rs +++ b/crates/core/component/dex/src/component/action_handler/swap_claim.rs @@ -7,7 +7,7 @@ use penumbra_txhash::TransactionContext; use cnidarium::{StateRead, StateWrite}; use penumbra_proof_params::SWAPCLAIM_PROOF_VERIFICATION_KEY; -use penumbra_proto::StateWriteProto; +use penumbra_proto::{DomainType as _, StateWriteProto}; use penumbra_sct::component::{ source::SourceContext, tree::{SctManager, VerificationExt}, @@ -95,7 +95,7 @@ impl ActionHandler for SwapClaim { state.nullify(self.body.nullifier, source).await; - state.record_proto(event::swap_claim(self)); + state.record_proto(event::EventSwapClaim::from(self).to_proto()); Ok(()) } diff --git a/crates/core/component/dex/src/component/arb.rs b/crates/core/component/dex/src/component/arb.rs index 9bb4b160ca..c6618d5756 100644 --- a/crates/core/component/dex/src/component/arb.rs +++ b/crates/core/component/dex/src/component/arb.rs @@ -5,7 +5,7 @@ use anyhow::Result; use async_trait::async_trait; use cnidarium::{StateDelta, StateWrite}; use penumbra_asset::{asset, Value}; -use penumbra_proto::StateWriteProto as _; +use penumbra_proto::{DomainType as _, StateWriteProto as _}; use penumbra_sct::component::clock::EpochRead; use tracing::instrument; @@ -134,7 +134,13 @@ pub trait Arbitrage: StateWrite + Sized { .await?; // Emit an ABCI event detailing the arb execution. - self_mut.record_proto(event::arb_execution(height, se)); + self_mut.record_proto( + event::EventArbExecution { + height, + swap_execution: se, + } + .to_proto(), + ); return Ok(Some(Value { amount: arb_profit, asset_id: arb_token, diff --git a/crates/core/component/dex/src/component/circuit_breaker/value.rs b/crates/core/component/dex/src/component/circuit_breaker/value.rs index 4708da46b0..bbe9785bad 100644 --- a/crates/core/component/dex/src/component/circuit_breaker/value.rs +++ b/crates/core/component/dex/src/component/circuit_breaker/value.rs @@ -2,7 +2,7 @@ use anyhow::{anyhow, Result}; use cnidarium::{StateRead, StateWrite}; use penumbra_asset::{asset, Value}; use penumbra_num::Amount; -use penumbra_proto::{StateReadProto, StateWriteProto}; +use penumbra_proto::{DomainType, StateReadProto, StateWriteProto}; use tonic::async_trait; use tracing::instrument; @@ -39,7 +39,14 @@ pub(crate) trait ValueCircuitBreaker: StateWrite { tracing::debug!(?prev_balance, ?new_balance, "crediting the dex VCB"); self.put(state_key::value_balance(&value.asset_id), new_balance); - self.record_proto(event::vcb_credit(value.asset_id, prev_balance, new_balance)); + self.record_proto( + event::EventValueCircuitBreakerCredit { + asset_id: value.asset_id, + previous_balance: prev_balance, + new_balance, + } + .to_proto(), + ); Ok(()) } @@ -61,7 +68,14 @@ pub(crate) trait ValueCircuitBreaker: StateWrite { tracing::debug!(?prev_balance, ?new_balance, "crediting the dex VCB"); self.put(state_key::value_balance(&value.asset_id), new_balance); - self.record_proto(event::vcb_debit(value.asset_id, prev_balance, new_balance)); + self.record_proto( + event::EventValueCircuitBreakerDebit { + asset_id: value.asset_id, + previous_balance: prev_balance, + new_balance, + } + .to_proto(), + ); Ok(()) } } diff --git a/crates/core/component/dex/src/component/dex.rs b/crates/core/component/dex/src/component/dex.rs index dfe982efb3..816a79f965 100644 --- a/crates/core/component/dex/src/component/dex.rs +++ b/crates/core/component/dex/src/component/dex.rs @@ -10,7 +10,7 @@ use penumbra_asset::{Value, STAKING_TOKEN_ASSET_ID}; use penumbra_fee::component::StateWriteExt as _; use penumbra_fee::Fee; use penumbra_num::Amount; -use penumbra_proto::{StateReadProto, StateWriteProto}; +use penumbra_proto::{DomainType as _, StateReadProto, StateWriteProto}; use tendermint::v0_37::abci; use tracing::instrument; @@ -399,11 +399,14 @@ pub(crate) trait InternalDexWrite: StateWrite { self.object_put(state_key::pending_outputs(), outputs); // Also generate an ABCI event for indexing: - self.record_proto(event::batch_swap( - output_data, - swap_execution_1_for_2, - swap_execution_2_for_1, - )); + self.record_proto( + event::EventBatchSwap { + batch_swap_output_data: output_data, + swap_execution_1_for_2, + swap_execution_2_for_1, + } + .to_proto(), + ); Ok(()) } diff --git a/crates/core/component/dex/src/component/position_manager.rs b/crates/core/component/dex/src/component/position_manager.rs index 0a1c9bcf29..5858f185c8 100644 --- a/crates/core/component/dex/src/component/position_manager.rs +++ b/crates/core/component/dex/src/component/position_manager.rs @@ -205,7 +205,7 @@ pub trait PositionManager: StateWrite + PositionRead { self.update_position(id, Some(prev_state), new_state) .await?; - self.record_proto(event::position_close_by_id(*id)); + self.record_proto(event::EventPositionClose { position_id: *id }.to_proto()); Ok(()) } @@ -279,7 +279,7 @@ pub trait PositionManager: StateWrite + PositionRead { self.mark_trading_pair_as_active(position.phi.pair); // Finally, record the new position state. - self.record_proto(event::position_open(&position)); + self.record_proto(event::EventPositionOpen::from(position.clone()).to_proto()); self.update_position(&id, None, position).await?; Ok(()) @@ -349,7 +349,9 @@ pub trait PositionManager: StateWrite + PositionRead { // We have already short-circuited no-op execution updates, so we can emit an execution // event and not worry about duplicates. - self.record_proto(event::position_execution(&prev_state, &new_state, context)); + self.record_proto( + event::EventPositionExecution::in_context(&prev_state, &new_state, context).to_proto(), + ); // Handle "close-on-fill": automatically flip the position state to "closed" if // either of the reserves are zero. @@ -363,7 +365,7 @@ pub trait PositionManager: StateWrite + PositionRead { ); new_state.state = position::State::Closed; - self.record_proto(event::position_close_by_id(position_id)); + self.record_proto(event::EventPositionClose { position_id }.to_proto()); } } @@ -431,7 +433,9 @@ pub trait PositionManager: StateWrite + PositionRead { // Record an event prior to updating the position state, so we have access to // the current reserves. - self.record_proto(event::position_withdraw(position_id, &prev_state)); + self.record_proto( + event::EventPositionWithdraw::in_context(position_id, &prev_state).to_proto(), + ); // Grab a copy of the final reserves of the position to return to the caller. let reserves = prev_state.reserves.balance(&prev_state.phi.pair); diff --git a/crates/core/component/dex/src/event.rs b/crates/core/component/dex/src/event.rs index a595dac5b3..5e2f052ead 100644 --- a/crates/core/component/dex/src/event.rs +++ b/crates/core/component/dex/src/event.rs @@ -1,143 +1,634 @@ use crate::{ - lp::{ - action::PositionClose, - position::{self, Position}, - }, + lp::position::{self, Position}, swap::Swap, swap_claim::SwapClaim, - BatchSwapOutputData, CandlestickData, DirectedTradingPair, SwapExecution, + BatchSwapOutputData, CandlestickData, DirectedTradingPair, SwapExecution, TradingPair, }; use anyhow::{anyhow, Context}; -use prost::Name; - use penumbra_asset::asset; use penumbra_num::Amount; use penumbra_proto::{penumbra::core::component::dex::v1 as pb, DomainType}; +use penumbra_sct::Nullifier; +use penumbra_tct::StateCommitment; +use prost::Name as _; + +#[derive(Clone, Debug)] +pub struct EventSwap { + pub trading_pair: TradingPair, + pub delta_1_i: Amount, + pub delta_2_i: Amount, + pub swap_commitment: StateCommitment, +} + +impl From for EventSwap { + fn from(value: Swap) -> Self { + Self::from(&value) + } +} + +impl From<&Swap> for EventSwap { + fn from(value: &Swap) -> Self { + Self { + trading_pair: value.body.trading_pair, + delta_1_i: value.body.delta_1_i, + delta_2_i: value.body.delta_2_i, + swap_commitment: value.body.payload.commitment, + } + } +} + +impl TryFrom for EventSwap { + type Error = anyhow::Error; + + fn try_from(value: pb::EventSwap) -> Result { + fn inner(value: pb::EventSwap) -> anyhow::Result { + Ok(EventSwap { + trading_pair: value + .trading_pair + .ok_or(anyhow!("missing `trading_pair`"))? + .try_into()?, + delta_1_i: value + .delta_1_i + .ok_or(anyhow!("missing `delta_1_i`"))? + .try_into()?, + delta_2_i: value + .delta_2_i + .ok_or(anyhow!("missing `delta_2_i`"))? + .try_into()?, + swap_commitment: value + .swap_commitment + .ok_or(anyhow!("missing `swap_commitment`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventSwap::NAME)) + } +} + +impl From for pb::EventSwap { + fn from(value: EventSwap) -> Self { + Self { + trading_pair: Some(value.trading_pair.into()), + delta_1_i: Some(value.delta_1_i.into()), + delta_2_i: Some(value.delta_2_i.into()), + swap_commitment: Some(value.swap_commitment.into()), + } + } +} + +impl DomainType for EventSwap { + type Proto = pb::EventSwap; +} + +#[derive(Clone, Debug)] +pub struct EventSwapClaim { + pub trading_pair: TradingPair, + pub output_1_commitment: StateCommitment, + pub output_2_commitment: StateCommitment, + pub nullifier: Nullifier, +} -pub fn swap(swap: &Swap) -> pb::EventSwap { - pb::EventSwap { - trading_pair: Some(swap.body.trading_pair.into()), - delta_1_i: Some(swap.body.delta_1_i.into()), - delta_2_i: Some(swap.body.delta_2_i.into()), - swap_commitment: Some(swap.body.payload.commitment.into()), +impl From for EventSwapClaim { + fn from(value: SwapClaim) -> Self { + Self::from(&value) } } -pub fn swap_claim(swap_claim: &SwapClaim) -> pb::EventSwapClaim { - pb::EventSwapClaim { - trading_pair: Some(swap_claim.body.output_data.trading_pair.into()), - output_1_commitment: Some(swap_claim.body.output_1_commitment.into()), - output_2_commitment: Some(swap_claim.body.output_2_commitment.into()), - nullifier: Some(swap_claim.body.nullifier.into()), +impl From<&SwapClaim> for EventSwapClaim { + fn from(value: &SwapClaim) -> Self { + Self { + trading_pair: value.body.output_data.trading_pair, + output_1_commitment: value.body.output_1_commitment, + output_2_commitment: value.body.output_2_commitment, + nullifier: value.body.nullifier, + } } } -pub fn position_open(position: &Position) -> pb::EventPositionOpen { - pb::EventPositionOpen { - position_id: Some(position.id().into()), - trading_pair: Some(position.phi.pair.into()), - reserves_1: Some(position.reserves.r1.into()), - reserves_2: Some(position.reserves.r2.into()), - trading_fee: position.phi.component.fee, - position: Some(position.clone().into()), +impl TryFrom for EventSwapClaim { + type Error = anyhow::Error; + + fn try_from(value: pb::EventSwapClaim) -> Result { + fn inner(value: pb::EventSwapClaim) -> anyhow::Result { + Ok(EventSwapClaim { + trading_pair: value + .trading_pair + .ok_or(anyhow!("missing `trading_pair`"))? + .try_into()?, + output_1_commitment: value + .output_1_commitment + .ok_or(anyhow!("missing `output_1_commitment`"))? + .try_into()?, + output_2_commitment: value + .output_2_commitment + .ok_or(anyhow!("missing `output_2_commitment`"))? + .try_into()?, + nullifier: value + .nullifier + .ok_or(anyhow!("missing `nullifier`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventSwapClaim::NAME)) + } +} + +impl From for pb::EventSwapClaim { + fn from(value: EventSwapClaim) -> Self { + Self { + trading_pair: Some(value.trading_pair.into()), + output_1_commitment: Some(value.output_1_commitment.into()), + output_2_commitment: Some(value.output_2_commitment.into()), + nullifier: Some(value.nullifier.into()), + } + } +} + +impl DomainType for EventSwapClaim { + type Proto = pb::EventSwapClaim; +} + +#[derive(Clone, Debug)] +pub struct EventPositionOpen { + pub position_id: position::Id, + pub trading_pair: TradingPair, + pub reserves_1: Amount, + pub reserves_2: Amount, + pub trading_fee: u32, + pub position: Position, +} + +impl From for EventPositionOpen { + fn from(value: Position) -> Self { + Self { + position_id: value.id(), + trading_pair: value.phi.pair, + reserves_1: value.reserves_1().amount, + reserves_2: value.reserves_2().amount, + trading_fee: value.phi.component.fee, + position: value, + } } } -pub fn position_close_by_id(id: position::Id) -> pb::EventPositionClose { - pb::EventPositionClose { - position_id: Some(id.into()), +impl TryFrom for EventPositionOpen { + type Error = anyhow::Error; + + fn try_from(value: pb::EventPositionOpen) -> Result { + fn inner(value: pb::EventPositionOpen) -> anyhow::Result { + Ok(EventPositionOpen { + position_id: value + .position_id + .ok_or(anyhow!("missing `position_id`"))? + .try_into()?, + trading_pair: value + .trading_pair + .ok_or(anyhow!("missing `trading_pair`"))? + .try_into()?, + reserves_1: value + .reserves_1 + .ok_or(anyhow!("missing `reserves_1`"))? + .try_into()?, + reserves_2: value + .reserves_2 + .ok_or(anyhow!("missing `reserves_2`"))? + .try_into()?, + trading_fee: value.trading_fee, + position: value + .position + .ok_or(anyhow!("missing `position`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventPositionOpen::NAME)) } } -pub fn position_close(action: &PositionClose) -> pb::EventPositionClose { - pb::EventPositionClose { - position_id: Some(action.position_id.into()), +impl From for pb::EventPositionOpen { + fn from(value: EventPositionOpen) -> Self { + Self { + position_id: Some(value.position_id.into()), + trading_pair: Some(value.trading_pair.into()), + reserves_1: Some(value.reserves_1.into()), + reserves_2: Some(value.reserves_2.into()), + trading_fee: value.trading_fee, + position: Some(value.position.into()), + } } } -pub fn queue_position_close(action: &PositionClose) -> pb::EventQueuePositionClose { - pb::EventQueuePositionClose { - position_id: Some(action.position_id.into()), +impl DomainType for EventPositionOpen { + type Proto = pb::EventPositionOpen; +} + +#[derive(Clone, Debug)] +pub struct EventPositionClose { + pub position_id: position::Id, +} + +impl TryFrom for EventPositionClose { + type Error = anyhow::Error; + + fn try_from(value: pb::EventPositionClose) -> Result { + fn inner(value: pb::EventPositionClose) -> anyhow::Result { + Ok(EventPositionClose { + position_id: value + .position_id + .ok_or(anyhow!("missing `position_id`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventPositionClose::NAME)) } } -pub fn position_withdraw( - position_id: position::Id, - final_position_state: &Position, -) -> pb::EventPositionWithdraw { - let sequence = if let position::State::Withdrawn { sequence, .. } = final_position_state.state { - sequence + 1 - } else { - 0 - }; - pb::EventPositionWithdraw { - position_id: Some(position_id.into()), - trading_pair: Some(final_position_state.phi.pair.into()), - reserves_1: Some(final_position_state.reserves.r1.into()), - reserves_2: Some(final_position_state.reserves.r2.into()), - sequence, +impl From for pb::EventPositionClose { + fn from(value: EventPositionClose) -> Self { + Self { + position_id: Some(value.position_id.into()), + } } } -pub fn position_execution( - prev_state: &Position, - new_state: &Position, - context: DirectedTradingPair, -) -> pb::EventPositionExecution { - pb::EventPositionExecution { - position_id: Some(new_state.id().into()), - trading_pair: Some(new_state.phi.pair.into()), - reserves_1: Some(new_state.reserves.r1.into()), - reserves_2: Some(new_state.reserves.r2.into()), - prev_reserves_1: Some(prev_state.reserves.r1.into()), - prev_reserves_2: Some(prev_state.reserves.r2.into()), - context: Some(context.into()), +impl DomainType for EventPositionClose { + type Proto = pb::EventPositionClose; +} + +#[derive(Clone, Debug)] +pub struct EventQueuePositionClose { + pub position_id: position::Id, +} + +impl TryFrom for EventQueuePositionClose { + type Error = anyhow::Error; + + fn try_from(value: pb::EventQueuePositionClose) -> Result { + fn inner(value: pb::EventQueuePositionClose) -> anyhow::Result { + Ok(EventQueuePositionClose { + position_id: value + .position_id + .ok_or(anyhow!("missing `position_id`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventQueuePositionClose::NAME)) } } -pub fn batch_swap( - bsod: BatchSwapOutputData, - swap_execution_1_for_2: Option, - swap_execution_2_for_1: Option, -) -> pb::EventBatchSwap { - pb::EventBatchSwap { - batch_swap_output_data: Some(bsod.into()), - swap_execution_1_for_2: swap_execution_1_for_2.map(Into::into), - swap_execution_2_for_1: swap_execution_2_for_1.map(Into::into), +impl From for pb::EventQueuePositionClose { + fn from(value: EventQueuePositionClose) -> Self { + Self { + position_id: Some(value.position_id.into()), + } } } -pub fn arb_execution(height: u64, swap_execution: SwapExecution) -> pb::EventArbExecution { - pb::EventArbExecution { - height, - swap_execution: Some(swap_execution.into()), +impl DomainType for EventQueuePositionClose { + type Proto = pb::EventQueuePositionClose; +} + +#[derive(Clone, Debug)] +pub struct EventPositionWithdraw { + pub position_id: position::Id, + pub trading_pair: TradingPair, + pub reserves_1: Amount, + pub reserves_2: Amount, + pub sequence: u64, +} + +impl EventPositionWithdraw { + /// Create this event using the usual context available to us. + pub fn in_context(position_id: position::Id, final_position_state: &Position) -> Self { + let sequence = + if let position::State::Withdrawn { sequence, .. } = final_position_state.state { + sequence + 1 + } else { + 0 + }; + Self { + position_id, + trading_pair: final_position_state.phi.pair, + reserves_1: final_position_state.reserves.r1, + reserves_2: final_position_state.reserves.r2, + sequence, + } } } -pub fn vcb_credit( - asset_id: asset::Id, - previous_balance: Amount, - new_balance: Amount, -) -> pb::EventValueCircuitBreakerCredit { - pb::EventValueCircuitBreakerCredit { - asset_id: Some(asset_id.into()), - previous_balance: Some(previous_balance.into()), - new_balance: Some(new_balance.into()), +impl TryFrom for EventPositionWithdraw { + type Error = anyhow::Error; + + fn try_from(value: pb::EventPositionWithdraw) -> Result { + fn inner(value: pb::EventPositionWithdraw) -> anyhow::Result { + Ok(EventPositionWithdraw { + position_id: value + .position_id + .ok_or(anyhow!("missing `position_id`"))? + .try_into()?, + trading_pair: value + .trading_pair + .ok_or(anyhow!("missing `trading_pair`"))? + .try_into()?, + reserves_1: value + .reserves_1 + .ok_or(anyhow!("missing `reserves_1`"))? + .try_into()?, + reserves_2: value + .reserves_2 + .ok_or(anyhow!("missing `reserves_2`"))? + .try_into()?, + sequence: value.sequence, + }) + } + inner(value).context(format!("parsing {}", pb::EventPositionWithdraw::NAME)) } } -pub fn vcb_debit( - asset_id: asset::Id, - previous_balance: Amount, - new_balance: Amount, -) -> pb::EventValueCircuitBreakerDebit { - pb::EventValueCircuitBreakerDebit { - asset_id: Some(asset_id.into()), - previous_balance: Some(previous_balance.into()), - new_balance: Some(new_balance.into()), +impl From for pb::EventPositionWithdraw { + fn from(value: EventPositionWithdraw) -> Self { + Self { + position_id: Some(value.position_id.into()), + trading_pair: Some(value.trading_pair.into()), + reserves_1: Some(value.reserves_1.into()), + reserves_2: Some(value.reserves_2.into()), + sequence: value.sequence, + } } } +impl DomainType for EventPositionWithdraw { + type Proto = pb::EventPositionWithdraw; +} + +#[derive(Clone, Debug)] +pub struct EventPositionExecution { + pub position_id: position::Id, + pub trading_pair: TradingPair, + pub reserves_1: Amount, + pub reserves_2: Amount, + pub prev_reserves_1: Amount, + pub prev_reserves_2: Amount, + pub context: DirectedTradingPair, +} + +impl EventPositionExecution { + /// Create this event using the usual context available to us. + pub fn in_context( + prev_state: &Position, + new_state: &Position, + context: DirectedTradingPair, + ) -> Self { + Self { + position_id: new_state.id(), + trading_pair: new_state.phi.pair, + reserves_1: new_state.reserves_1().amount, + reserves_2: new_state.reserves_2().amount, + prev_reserves_1: prev_state.reserves_1().amount, + prev_reserves_2: prev_state.reserves_2().amount, + context, + } + } +} + +impl TryFrom for EventPositionExecution { + type Error = anyhow::Error; + + fn try_from(value: pb::EventPositionExecution) -> Result { + fn inner(value: pb::EventPositionExecution) -> anyhow::Result { + Ok(EventPositionExecution { + position_id: value + .position_id + .ok_or(anyhow!("missing `position_id`"))? + .try_into()?, + trading_pair: value + .trading_pair + .ok_or(anyhow!("missing `trading_pair`"))? + .try_into()?, + reserves_1: value + .reserves_1 + .ok_or(anyhow!("missing `reserves_1`"))? + .try_into()?, + reserves_2: value + .reserves_2 + .ok_or(anyhow!("missing `reserves_2`"))? + .try_into()?, + prev_reserves_1: value + .prev_reserves_1 + .ok_or(anyhow!("missing `prev_reserves_1`"))? + .try_into()?, + prev_reserves_2: value + .prev_reserves_2 + .ok_or(anyhow!("missing `prev_reserves_2`"))? + .try_into()?, + context: value + .context + .ok_or(anyhow!("missing `context`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventPositionExecution::NAME)) + } +} + +impl From for pb::EventPositionExecution { + fn from(value: EventPositionExecution) -> Self { + Self { + position_id: Some(value.position_id.into()), + trading_pair: Some(value.trading_pair.into()), + reserves_1: Some(value.reserves_1.into()), + reserves_2: Some(value.reserves_2.into()), + prev_reserves_1: Some(value.prev_reserves_1.into()), + prev_reserves_2: Some(value.prev_reserves_2.into()), + context: Some(value.context.into()), + } + } +} + +impl DomainType for EventPositionExecution { + type Proto = pb::EventPositionExecution; +} + +#[derive(Clone, Debug)] +pub struct EventBatchSwap { + pub batch_swap_output_data: BatchSwapOutputData, + pub swap_execution_1_for_2: Option, + pub swap_execution_2_for_1: Option, +} + +impl TryFrom for EventBatchSwap { + type Error = anyhow::Error; + + fn try_from(value: pb::EventBatchSwap) -> Result { + fn inner(value: pb::EventBatchSwap) -> anyhow::Result { + Ok(EventBatchSwap { + batch_swap_output_data: value + .batch_swap_output_data + .ok_or(anyhow!("missing `batch_swap_output_data`"))? + .try_into()?, + swap_execution_1_for_2: value + .swap_execution_1_for_2 + .map(|x| x.try_into()) + .transpose()?, + swap_execution_2_for_1: value + .swap_execution_2_for_1 + .map(|x| x.try_into()) + .transpose()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventBatchSwap::NAME)) + } +} + +impl From for pb::EventBatchSwap { + fn from(value: EventBatchSwap) -> Self { + Self { + batch_swap_output_data: Some(value.batch_swap_output_data.into()), + swap_execution_1_for_2: value.swap_execution_1_for_2.map(|x| x.into()), + swap_execution_2_for_1: value.swap_execution_2_for_1.map(|x| x.into()), + } + } +} + +impl DomainType for EventBatchSwap { + type Proto = pb::EventBatchSwap; +} + +#[derive(Clone, Debug)] +pub struct EventArbExecution { + pub height: u64, + pub swap_execution: SwapExecution, +} + +impl TryFrom for EventArbExecution { + type Error = anyhow::Error; + + fn try_from(value: pb::EventArbExecution) -> Result { + fn inner(value: pb::EventArbExecution) -> anyhow::Result { + Ok(EventArbExecution { + height: value.height, + swap_execution: value + .swap_execution + .ok_or(anyhow!("missing `swap_execution`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventArbExecution::NAME)) + } +} + +impl From for pb::EventArbExecution { + fn from(value: EventArbExecution) -> Self { + Self { + height: value.height, + swap_execution: Some(value.swap_execution.into()), + } + } +} + +impl DomainType for EventArbExecution { + type Proto = pb::EventArbExecution; +} + +#[derive(Clone, Debug)] +pub struct EventValueCircuitBreakerCredit { + pub asset_id: asset::Id, + pub previous_balance: Amount, + pub new_balance: Amount, +} + +impl TryFrom for EventValueCircuitBreakerCredit { + type Error = anyhow::Error; + + fn try_from(value: pb::EventValueCircuitBreakerCredit) -> Result { + fn inner( + value: pb::EventValueCircuitBreakerCredit, + ) -> anyhow::Result { + Ok(EventValueCircuitBreakerCredit { + asset_id: value + .asset_id + .ok_or(anyhow!("missing `asset_id`"))? + .try_into()?, + previous_balance: value + .previous_balance + .ok_or(anyhow!("missing `previous_balance`"))? + .try_into()?, + new_balance: value + .new_balance + .ok_or(anyhow!("missing `new_balance`"))? + .try_into()?, + }) + } + inner(value).context(format!( + "parsing {}", + pb::EventValueCircuitBreakerCredit::NAME + )) + } +} + +impl From for pb::EventValueCircuitBreakerCredit { + fn from(value: EventValueCircuitBreakerCredit) -> Self { + Self { + asset_id: Some(value.asset_id.into()), + previous_balance: Some(value.previous_balance.into()), + new_balance: Some(value.new_balance.into()), + } + } +} + +impl DomainType for EventValueCircuitBreakerCredit { + type Proto = pb::EventValueCircuitBreakerCredit; +} + +#[derive(Clone, Debug)] +pub struct EventValueCircuitBreakerDebit { + pub asset_id: asset::Id, + pub previous_balance: Amount, + pub new_balance: Amount, +} + +impl TryFrom for EventValueCircuitBreakerDebit { + type Error = anyhow::Error; + + fn try_from(value: pb::EventValueCircuitBreakerDebit) -> Result { + fn inner( + value: pb::EventValueCircuitBreakerDebit, + ) -> anyhow::Result { + Ok(EventValueCircuitBreakerDebit { + asset_id: value + .asset_id + .ok_or(anyhow!("missing `asset_id`"))? + .try_into()?, + previous_balance: value + .previous_balance + .ok_or(anyhow!("missing `previous_balance`"))? + .try_into()?, + new_balance: value + .new_balance + .ok_or(anyhow!("missing `new_balance`"))? + .try_into()?, + }) + } + inner(value).context(format!( + "parsing {}", + pb::EventValueCircuitBreakerDebit::NAME + )) + } +} + +impl From for pb::EventValueCircuitBreakerDebit { + fn from(value: EventValueCircuitBreakerDebit) -> Self { + Self { + asset_id: Some(value.asset_id.into()), + previous_balance: Some(value.previous_balance.into()), + new_balance: Some(value.new_balance.into()), + } + } +} + +impl DomainType for EventValueCircuitBreakerDebit { + type Proto = pb::EventValueCircuitBreakerDebit; +} + #[derive(Clone, Debug)] pub struct EventCandlestickData { pub pair: DirectedTradingPair, From 783db09c78be4617b2ff668ae35f45cf319caf9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Fri, 11 Oct 2024 12:58:39 -0700 Subject: [PATCH 39/43] feat(pindexer): add dex candlestick support to schema (#4894) ## Describe your changes Closes #4869. This implementation isn't as fast as it could probably be, but more than enough to be real time, which is all that really matters. (Maintaining the summary is annoying and kind of tricky, especially if you want to correctly handle a "frozen price" for pairs where no activity is happening) ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > indexing only --- Cargo.lock | 3 + crates/bin/pindexer/Cargo.toml | 3 + crates/bin/pindexer/src/dex_ex/mod.rs | 510 ++++++++++++++++++ crates/bin/pindexer/src/dex_ex/schema.sql | 73 +++ crates/bin/pindexer/src/indexer_ext.rs | 2 +- crates/bin/pindexer/src/lib.rs | 1 + .../core/component/sct/src/component/tree.rs | 11 +- crates/core/component/sct/src/event.rs | 53 +- 8 files changed, 644 insertions(+), 12 deletions(-) create mode 100644 crates/bin/pindexer/src/dex_ex/mod.rs create mode 100644 crates/bin/pindexer/src/dex_ex/schema.sql diff --git a/Cargo.lock b/Cargo.lock index 367c0504aa..4681e50097 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5815,6 +5815,7 @@ name = "pindexer" version = "0.80.6" dependencies = [ "anyhow", + "chrono", "clap", "cometindex", "num-bigint", @@ -5827,8 +5828,10 @@ dependencies = [ "penumbra-keys", "penumbra-num", "penumbra-proto", + "penumbra-sct", "penumbra-shielded-pool", "penumbra-stake", + "prost", "serde_json", "sqlx", "tokio", diff --git a/crates/bin/pindexer/Cargo.toml b/crates/bin/pindexer/Cargo.toml index 88e062b8ca..610d43e780 100644 --- a/crates/bin/pindexer/Cargo.toml +++ b/crates/bin/pindexer/Cargo.toml @@ -13,6 +13,7 @@ publish = false [dependencies] anyhow = {workspace = true} clap = {workspace = true} +chrono = {workspace = true} cometindex = {workspace = true} num-bigint = { version = "0.4" } penumbra-shielded-pool = {workspace = true, default-features = false} @@ -26,6 +27,8 @@ penumbra-governance = {workspace = true, default-features = false} penumbra-num = {workspace = true, default-features = false} penumbra-asset = {workspace = true, default-features = false} penumbra-proto = {workspace = true, default-features = false} +penumbra-sct = {workspace = true, default-features = false} +prost = {workspace = true} tracing = {workspace = true} tokio = {workspace = true, features = ["full"]} serde_json = {workspace = true} diff --git a/crates/bin/pindexer/src/dex_ex/mod.rs b/crates/bin/pindexer/src/dex_ex/mod.rs new file mode 100644 index 0000000000..ec3882d0c9 --- /dev/null +++ b/crates/bin/pindexer/src/dex_ex/mod.rs @@ -0,0 +1,510 @@ +use std::fmt::Display; + +use anyhow::{anyhow, Context}; +use chrono::{Datelike, Days, TimeZone, Timelike as _, Utc}; +use cometindex::{async_trait, AppView, ContextualizedEvent, PgTransaction}; +use penumbra_asset::asset; +use penumbra_dex::{event::EventCandlestickData, CandlestickData}; +use penumbra_proto::{event::EventDomainType, DomainType}; +use penumbra_sct::event::EventBlockRoot; +use prost::Name as _; +use sqlx::PgPool; + +type DateTime = sqlx::types::chrono::DateTime; + +/// Candlestick data, unmoored from the prison of a particular block height. +/// +/// In other words, this can represent candlesticks which span arbitrary windows, +/// and not just a single block. +#[derive(Debug, Clone, Copy)] +struct Candle { + open: f64, + close: f64, + low: f64, + high: f64, + direct_volume: f64, + swap_volume: f64, +} + +impl Candle { + fn from_candlestick_data(data: &CandlestickData) -> Self { + Self { + open: data.open, + close: data.close, + low: data.low, + high: data.high, + direct_volume: data.direct_volume, + swap_volume: data.swap_volume, + } + } + + fn merge(&self, that: &Self) -> Self { + Self { + open: self.open, + close: that.close, + low: self.low.min(that.low), + high: self.high.max(that.high), + direct_volume: self.direct_volume + that.direct_volume, + swap_volume: self.swap_volume + that.swap_volume, + } + } +} + +impl From for Candle { + fn from(value: CandlestickData) -> Self { + Self::from(&value) + } +} + +impl From<&CandlestickData> for Candle { + fn from(value: &CandlestickData) -> Self { + Self::from_candlestick_data(value) + } +} + +#[derive(Clone, Copy, Debug)] +enum Window { + W1m, + W15m, + W1h, + W4h, + W1d, + W1w, + W1mo, +} + +impl Window { + fn all() -> impl Iterator { + [ + Window::W1m, + Window::W15m, + Window::W1h, + Window::W4h, + Window::W1d, + Window::W1w, + Window::W1mo, + ] + .into_iter() + } + + /// Get the anchor for a given time. + /// + /// This is the latest time that "snaps" to a given anchor, dependent on the window. + /// + /// For example, the 1 minute window has an anchor every minute, the day window + /// every day, etc. + fn anchor(&self, time: DateTime) -> DateTime { + let (y, mo, d, h, m) = ( + time.year(), + time.month(), + time.day(), + time.hour(), + time.minute(), + ); + let out = match self { + Window::W1m => Utc.with_ymd_and_hms(y, mo, d, h, m, 0).single(), + Window::W15m => Utc.with_ymd_and_hms(y, mo, d, h, m - (m % 15), 0).single(), + Window::W1h => Utc.with_ymd_and_hms(y, mo, d, h, 0, 0).single(), + Window::W4h => Utc.with_ymd_and_hms(y, mo, d, h - (h % 4), 0, 0).single(), + Window::W1d => Utc.with_ymd_and_hms(y, mo, d, 0, 0, 0).single(), + Window::W1w => Utc + .with_ymd_and_hms(y, mo, d, 0, 0, 0) + .single() + .and_then(|x| { + x.checked_sub_days(Days::new(time.weekday().num_days_from_monday().into())) + }), + Window::W1mo => Utc.with_ymd_and_hms(y, mo, 1, 0, 0, 0).single(), + }; + out.unwrap() + } +} + +impl Display for Window { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use Window::*; + let str = match self { + W1m => "1m", + W15m => "15m", + W1h => "1h", + W4h => "4h", + W1d => "1d", + W1w => "1w", + W1mo => "1mo", + }; + write!(f, "{}", str) + } +} + +mod price_chart { + use super::*; + + /// A context when processing a price chart. + #[derive(Debug)] + pub struct Context<'tx, 'db> { + dbtx: &'tx mut PgTransaction<'db>, + asset_start: asset::Id, + asset_end: asset::Id, + window: Window, + } + + impl<'tx, 'db> Context<'tx, 'db> { + pub fn new( + dbtx: &'tx mut PgTransaction<'db>, + asset_start: asset::Id, + asset_end: asset::Id, + window: Window, + ) -> Self { + Self { + dbtx, + asset_start, + asset_end, + window, + } + } + + /// Get the candle we should update, based on the current timestamp. + async fn relevant_candle( + &mut self, + anchor: DateTime, + ) -> anyhow::Result> { + let stuff: Option<(i32, f64, f64, f64, f64, f64, f64)> = sqlx::query_as( + r#" + SELECT + dex_ex_candlesticks.id, + open, + close, + high, + low, + direct_volume, + swap_volume + FROM dex_ex_price_charts + JOIN dex_ex_candlesticks ON dex_ex_candlesticks.id = candlestick_id + WHERE asset_start = $1 + AND asset_end = $2 + AND the_window = $3 + AND start_time >= $4 + "#, + ) + .bind(self.asset_start.to_bytes().as_slice()) + .bind(self.asset_end.to_bytes().as_slice()) + .bind(self.window.to_string()) + .bind(anchor) + .fetch_optional(self.dbtx.as_mut()) + .await?; + Ok( + stuff.map(|(id, open, close, high, low, direct_volume, swap_volume)| { + ( + id, + Candle { + open, + close, + high, + low, + direct_volume, + swap_volume, + }, + ) + }), + ) + } + + async fn create_candle(&mut self, anchor: DateTime, candle: Candle) -> anyhow::Result<()> { + let id: i32 = sqlx::query_scalar( + r#" + INSERT INTO dex_ex_candlesticks VALUES (DEFAULT, $1, $2, $3, $4, $5, $6) RETURNING id + "#, + ) + .bind(candle.open) + .bind(candle.close) + .bind(candle.high) + .bind(candle.low) + .bind(candle.direct_volume) + .bind(candle.swap_volume) + .fetch_one(self.dbtx.as_mut()) + .await?; + sqlx::query( + r#" + INSERT INTO dex_ex_price_charts VALUES (DEFAULT, $1, $2, $3, $4, $5) + "#, + ) + .bind(self.asset_start.to_bytes().as_slice()) + .bind(self.asset_end.to_bytes().as_slice()) + .bind(self.window.to_string()) + .bind(anchor) + .bind(id) + .execute(self.dbtx.as_mut()) + .await?; + Ok(()) + } + + async fn update_candle(&mut self, id: i32, candle: Candle) -> anyhow::Result<()> { + sqlx::query( + r#" + UPDATE dex_ex_candlesticks + SET (open, close, high, low, direct_volume, swap_volume) = + ($1, $2, $3, $4, $5, $6) + WHERE id = $7 + "#, + ) + .bind(candle.open) + .bind(candle.close) + .bind(candle.high) + .bind(candle.low) + .bind(candle.direct_volume) + .bind(candle.swap_volume) + .bind(id) + .execute(self.dbtx.as_mut()) + .await?; + Ok(()) + } + + pub async fn update(&mut self, time: DateTime, candle: Candle) -> anyhow::Result<()> { + let anchor = self.window.anchor(time); + match self.relevant_candle(anchor).await? { + None => self.create_candle(anchor, candle).await?, + Some((id, old_candle)) => self.update_candle(id, old_candle.merge(&candle)).await?, + }; + Ok(()) + } + } +} + +use price_chart::Context as PriceChartContext; + +mod summary { + use super::*; + + #[derive(Debug)] + pub struct Context<'tx, 'db> { + dbtx: &'tx mut PgTransaction<'db>, + asset_start: asset::Id, + asset_end: asset::Id, + } + + impl<'tx, 'db> Context<'tx, 'db> { + pub fn new( + dbtx: &'tx mut PgTransaction<'db>, + asset_start: asset::Id, + asset_end: asset::Id, + ) -> Self { + Self { + dbtx, + asset_start, + asset_end, + } + } + + pub async fn add_candle(&mut self, time: DateTime, candle: Candle) -> anyhow::Result<()> { + let asset_start = self.asset_start.to_bytes(); + let asset_end = self.asset_end.to_bytes(); + sqlx::query( + r#" + INSERT INTO _dex_ex_summary_backing VALUES ($1, $2, $3, $4, $5, $6) + "#, + ) + .bind(asset_start.as_slice()) + .bind(asset_end.as_slice()) + .bind(time) + .bind(candle.close) + .bind(candle.direct_volume) + .bind(candle.swap_volume) + .execute(self.dbtx.as_mut()) + .await?; + Ok(()) + } + } + + pub async fn update_all(dbtx: &mut PgTransaction<'_>, time: DateTime) -> anyhow::Result<()> { + let time_24h_ago = time + .checked_sub_days(Days::new(1)) + .ok_or(anyhow!("should be able to get time 24h ago from {}", time))?; + sqlx::query( + r#" + DELETE FROM _dex_ex_summary_backing WHERE time < $1 + "#, + ) + .bind(time_24h_ago) + .execute(dbtx.as_mut()) + .await?; + // Update all of the summaries with relevant backing data. + // + // We choose this one as being responsible for creating the first summary. + sqlx::query( + r#" + INSERT INTO dex_ex_summary + SELECT DISTINCT ON (asset_start, asset_end) + asset_start, + asset_end, + FIRST_VALUE(price) OVER w AS price_24h_ago, + price AS current_price, + MAX(price) OVER w AS high_24h, + MIN(price) OVER w AS low_24h, + SUM(direct_volume) OVER w AS direct_volume_24h, + SUM(swap_volume) OVER w AS swap_volume_24h + FROM _dex_ex_summary_backing + WINDOW w AS ( + PARTITION BY + asset_start, asset_end + ORDER BY asset_start, asset_end, time DESC + ) ORDER by asset_start, asset_end, time ASC + ON CONFLICT (asset_start, asset_end) DO UPDATE SET + price_24h_ago = EXCLUDED.price_24h_ago, + current_price = EXCLUDED.current_price, + high_24h = EXCLUDED.high_24h, + low_24h = EXCLUDED.low_24h, + direct_volume_24h = EXCLUDED.direct_volume_24h, + swap_volume_24h = EXCLUDED.swap_volume_24h + "#, + ) + .execute(dbtx.as_mut()) + .await?; + // When we don't have backing data, we should nonetheless update to reflect this + sqlx::query( + r#" + UPDATE dex_ex_summary + SET + price_24h_ago = current_price, + high_24h = current_price, + low_24h = current_price, + direct_volume_24h = 0, + swap_volume_24h = 0 + WHERE NOT EXISTS ( + SELECT 1 + FROM _dex_ex_summary_backing + WHERE + _dex_ex_summary_backing.asset_start = dex_ex_summary.asset_start + AND + _dex_ex_summary_backing.asset_end = dex_ex_summary.asset_end + ) + "#, + ) + .execute(dbtx.as_mut()) + .await?; + Ok(()) + } +} + +use summary::Context as SummaryContext; + +async fn queue_event_candlestick_data( + dbtx: &mut PgTransaction<'_>, + height: u64, + event: EventCandlestickData, +) -> anyhow::Result<()> { + sqlx::query("INSERT INTO _dex_ex_queue VALUES (DEFAULT, $1, $2)") + .bind(i64::try_from(height)?) + .bind(event.encode_to_vec().as_slice()) + .execute(dbtx.as_mut()) + .await?; + Ok(()) +} + +async fn unqueue_event_candlestick_data( + dbtx: &mut PgTransaction<'_>, + height: u64, +) -> anyhow::Result> { + let values: Vec> = + sqlx::query_scalar("DELETE FROM _dex_ex_queue WHERE height = $1 RETURNING data") + .bind(i64::try_from(height)?) + .fetch_all(dbtx.as_mut()) + .await?; + values + .into_iter() + .map(|x| EventCandlestickData::decode(x.as_slice())) + .collect() +} + +async fn on_event_candlestick_data( + dbtx: &mut PgTransaction<'_>, + event_time: DateTime, + event: EventCandlestickData, +) -> anyhow::Result<()> { + let asset_start = event.pair.start; + let asset_end = event.pair.end; + let candle = event.stick.into(); + for window in Window::all() { + let mut ctx = PriceChartContext::new(dbtx, asset_start, asset_end, window); + ctx.update(event_time, candle).await?; + } + let mut ctx = SummaryContext::new(dbtx, asset_start, asset_end); + ctx.add_candle(event_time, candle).await?; + Ok(()) +} + +async fn fetch_height_time( + dbtx: &mut PgTransaction<'_>, + height: u64, +) -> anyhow::Result> { + const CTX: &'static str = r#" +The `dex_ex` component relies on the `block` component to be running, to provide the `block_details` with timestamps. +Make sure that is running as well. +"#; + sqlx::query_scalar("SELECT timestamp FROM block_details WHERE height = $1") + .bind(i64::try_from(height)?) + .fetch_optional(dbtx.as_mut()) + .await + .context(CTX) +} + +#[derive(Debug)] +pub struct Component {} + +impl Component { + pub fn new() -> Self { + Self {} + } +} + +#[async_trait] +impl AppView for Component { + async fn init_chain( + &self, + dbtx: &mut PgTransaction, + _: &serde_json::Value, + ) -> Result<(), anyhow::Error> { + for statement in include_str!("schema.sql").split(";") { + sqlx::query(statement).execute(dbtx.as_mut()).await?; + } + Ok(()) + } + + fn is_relevant(&self, type_str: &str) -> bool { + [ + ::Proto::full_name(), + ::Proto::full_name(), + ] + .into_iter() + .any(|x| type_str == x) + } + + async fn index_event( + &self, + dbtx: &mut PgTransaction, + event: &ContextualizedEvent, + _src_db: &PgPool, + ) -> Result<(), anyhow::Error> { + if let Ok(e) = EventCandlestickData::try_from_event(&event.event) { + let height = event.block_height; + match fetch_height_time(dbtx, height).await? { + None => { + queue_event_candlestick_data(dbtx, height, e).await?; + } + Some(time) => { + on_event_candlestick_data(dbtx, time, e).await?; + } + } + } else if let Ok(e) = EventBlockRoot::try_from_event(&event.event) { + let height = e.height; + let time = DateTime::from_timestamp(e.timestamp_seconds, 0).ok_or(anyhow!( + "creating timestamp should succeed; timestamp: {}", + e.timestamp_seconds + ))?; + for event in unqueue_event_candlestick_data(dbtx, height).await? { + on_event_candlestick_data(dbtx, time, event).await?; + } + summary::update_all(dbtx, time).await?; + } + tracing::debug!(?event, "unrecognized event"); + Ok(()) + } +} diff --git a/crates/bin/pindexer/src/dex_ex/schema.sql b/crates/bin/pindexer/src/dex_ex/schema.sql new file mode 100644 index 0000000000..53f85ceb8a --- /dev/null +++ b/crates/bin/pindexer/src/dex_ex/schema.sql @@ -0,0 +1,73 @@ +CREATE TABLE IF NOT EXISTS dex_ex_candlesticks ( + id SERIAL PRIMARY KEY, + -- The price at the start of a window. + open FLOAT8 NOT NULL, + -- The price at the close of a window. + close FLOAT8 NOT NULL, + -- The highest price reached during a window. + high FLOAT8 NOT NULL, + -- The lowest price reached during a window. + low FLOAT8 NOT NULL, + -- The volume traded directly through position executions. + direct_volume FLOAT8 NOT NULL, + -- The volume that traded indirectly, possibly through several positions. + swap_volume FLOAT8 NOT NULL +); + +-- Contains, for each directed asset pair and window type, candle sticks for each window. +CREATE TABLE IF NOT EXISTS dex_ex_price_charts ( + -- We just want a simple primary key to have here. + id SERIAL PRIMARY KEY, + -- The bytes for the first asset in the directed pair. + asset_start BYTEA NOT NULL, + -- The bytes for the second asset in the directed pair. + asset_end BYTEA NOT NULL, + -- The window type for this stick. + -- + -- Enum types are annoying. + the_window TEXT NOT NULL, + -- The start time of this window. + start_time TIMESTAMPTZ NOT NULL, + -- The start time for the window this stick is about. + candlestick_id INTEGER UNIQUE REFERENCES dex_ex_candlesticks (id) +); + +CREATE UNIQUE INDEX ON dex_ex_price_charts (asset_start, asset_end, the_window, start_time); + +CREATE TABLE IF NOT EXISTS _dex_ex_summary_backing ( + asset_start BYTEA NOT NULL, + asset_end BYTEA NOT NULL, + -- The time for this bit of information. + time TIMESTAMPTZ NOT NULL, + -- The price at this point. + price FLOAT8 NOT NULL, + -- The volume for this particular candle. + direct_volume FLOAT8 NOT NULL, + swap_volume FLOAT8 NOT NULL, + PRIMARY KEY (asset_start, asset_end, time) +); + +CREATE TABLE IF NOT EXISTS dex_ex_summary ( + -- The first asset of the directed pair. + asset_start BYTEA NOT NULL, + -- The second asset of the directed pair. + asset_end BYTEA NOT NULL, + -- The current price (in terms of asset2) + current_price FLOAT8 NOT NULL, + -- The price 24h ago. + price_24h_ago FLOAT8 NOT NULL, + -- The highest price over the past 24h. + high_24h FLOAT8 NOT NULL, + -- The lowest price over the past 24h. + low_24h FLOAT8 NOT NULL, + -- c.f. candlesticks for the difference between these two + direct_volume_24h FLOAT8 NOT NULL, + swap_volume_24h FLOAT8 NOT NULL, + PRIMARY KEY (asset_start, asset_end) +); + +CREATE TABLE IF NOT EXISTS _dex_ex_queue ( + id SERIAL PRIMARY KEY, + height BIGINT NOT NULL, + data BYTEA NOT NULL +); diff --git a/crates/bin/pindexer/src/indexer_ext.rs b/crates/bin/pindexer/src/indexer_ext.rs index 6a7c5e8208..950000fdd3 100644 --- a/crates/bin/pindexer/src/indexer_ext.rs +++ b/crates/bin/pindexer/src/indexer_ext.rs @@ -10,7 +10,7 @@ impl IndexerExt for cometindex::Indexer { .with_index(crate::stake::DelegationTxs {}) .with_index(crate::stake::UndelegationTxs {}) .with_index(crate::governance::GovernanceProposals {}) - .with_index(crate::dex::Component::new()) + .with_index(crate::dex_ex::Component::new()) .with_index(crate::supply::Component::new()) .with_index(crate::ibc::Component::new()) } diff --git a/crates/bin/pindexer/src/lib.rs b/crates/bin/pindexer/src/lib.rs index 353aaaf0b2..e2c2d63476 100644 --- a/crates/bin/pindexer/src/lib.rs +++ b/crates/bin/pindexer/src/lib.rs @@ -4,6 +4,7 @@ mod indexer_ext; pub use indexer_ext::IndexerExt; pub mod block; pub mod dex; +pub mod dex_ex; pub mod ibc; mod parsing; pub mod shielded_pool; diff --git a/crates/core/component/sct/src/component/tree.rs b/crates/core/component/sct/src/component/tree.rs index b0d987a109..9bbd83e263 100644 --- a/crates/core/component/sct/src/component/tree.rs +++ b/crates/core/component/sct/src/component/tree.rs @@ -1,7 +1,7 @@ use anyhow::{anyhow, Result}; use async_trait::async_trait; use cnidarium::{StateRead, StateWrite}; -use penumbra_proto::{StateReadProto, StateWriteProto}; +use penumbra_proto::{DomainType as _, StateReadProto, StateWriteProto}; use penumbra_tct as tct; use tct::builder::{block, epoch}; use tracing::instrument; @@ -85,7 +85,14 @@ pub trait SctManager: StateWrite { self.put(state_key::tree::anchor_by_height(height), sct_anchor); self.record_proto(event::anchor(height, sct_anchor, block_timestamp)); - self.record_proto(event::block_root(height, block_root, block_timestamp)); + self.record_proto( + event::EventBlockRoot { + height, + root: block_root, + timestamp_seconds: block_timestamp, + } + .to_proto(), + ); // Only record an epoch root event if we are ending the epoch. if let Some(epoch_root) = epoch_root { let index = self diff --git a/crates/core/component/sct/src/event.rs b/crates/core/component/sct/src/event.rs index 65a462b0f8..43bef0da20 100644 --- a/crates/core/component/sct/src/event.rs +++ b/crates/core/component/sct/src/event.rs @@ -1,8 +1,12 @@ +use anyhow::{anyhow, Context as _}; use pbjson_types::Timestamp; use penumbra_tct as tct; use tct::builder::{block, epoch}; -use penumbra_proto::core::component::sct::v1 as pb; +use penumbra_proto::{ + core::component::sct::v1::{self as pb}, + DomainType, Name as _, +}; use crate::CommitmentSource; @@ -17,17 +21,48 @@ pub fn anchor(height: u64, anchor: tct::Root, timestamp: i64) -> pb::EventAnchor } } -pub fn block_root(height: u64, root: block::Root, timestamp: i64) -> pb::EventBlockRoot { - pb::EventBlockRoot { - height, - root: Some(root.into()), - timestamp: Some(Timestamp { - seconds: timestamp, - nanos: 0, - }), +#[derive(Debug, Clone)] +pub struct EventBlockRoot { + pub height: u64, + pub root: block::Root, + pub timestamp_seconds: i64, +} + +impl TryFrom for EventBlockRoot { + type Error = anyhow::Error; + + fn try_from(value: pb::EventBlockRoot) -> Result { + fn inner(value: pb::EventBlockRoot) -> anyhow::Result { + Ok(EventBlockRoot { + height: value.height, + root: value.root.ok_or(anyhow!("missing `root`"))?.try_into()?, + timestamp_seconds: value + .timestamp + .ok_or(anyhow!("missing `timestamp`"))? + .seconds, + }) + } + inner(value).context(format!("parsing {}", pb::EventBlockRoot::NAME)) } } +impl From for pb::EventBlockRoot { + fn from(value: EventBlockRoot) -> Self { + Self { + height: value.height, + root: Some(value.root.into()), + timestamp: Some(Timestamp { + seconds: value.timestamp_seconds, + nanos: 0, + }), + } + } +} + +impl DomainType for EventBlockRoot { + type Proto = pb::EventBlockRoot; +} + pub fn epoch_root(index: u64, root: epoch::Root, timestamp: i64) -> pb::EventEpochRoot { pb::EventEpochRoot { index, From 954e777d3939d10f3f3983a4da2ae649f9197d7a Mon Sep 17 00:00:00 2001 From: redshiftzero Date: Thu, 17 Oct 2024 18:52:40 -0400 Subject: [PATCH 40/43] pmonitor: create client activity monitor (#4844) Creates a new `pmonitor` tool that allows for tracking balances across multiple accounts over time, given an input list of FVKs, specified in JSON. Separate directories are maintained for each wallet's view database, so that network syncs are stored locally and faster on subsequent updates. The tool will exit non-zero if non-compliance was detected. Includes integration tests for common use cases, to guard against regressions. Closes #4832. Co-authored-by: Conor Schaefer --- .github/workflows/smoke.yml | 34 +- Cargo.lock | 38 ++ Cargo.toml | 1 + crates/bin/pcli/src/opt.rs | 2 +- crates/bin/pmonitor/Cargo.toml | 46 ++ crates/bin/pmonitor/src/config.rs | 118 ++++ crates/bin/pmonitor/src/genesis.rs | 117 ++++ crates/bin/pmonitor/src/main.rs | 598 ++++++++++++++++++ crates/bin/pmonitor/tests/common/mod.rs | 332 ++++++++++ .../bin/pmonitor/tests/common/pcli_helpers.rs | 63 ++ .../bin/pmonitor/tests/network_integration.rs | 247 ++++++++ crates/view/src/storage.rs | 71 ++- crates/view/src/storage/schema.sql | 3 +- deployments/containerfiles/Dockerfile | 1 + .../scripts/pmonitor-integration-test.sh | 110 ++++ deployments/scripts/rust-docs | 1 + flake.nix | 4 +- justfile | 9 + 18 files changed, 1768 insertions(+), 27 deletions(-) create mode 100644 crates/bin/pmonitor/Cargo.toml create mode 100644 crates/bin/pmonitor/src/config.rs create mode 100644 crates/bin/pmonitor/src/genesis.rs create mode 100644 crates/bin/pmonitor/src/main.rs create mode 100644 crates/bin/pmonitor/tests/common/mod.rs create mode 100644 crates/bin/pmonitor/tests/common/pcli_helpers.rs create mode 100644 crates/bin/pmonitor/tests/network_integration.rs create mode 100755 deployments/scripts/pmonitor-integration-test.sh diff --git a/.github/workflows/smoke.yml b/.github/workflows/smoke.yml index 7454c26cc0..37d13d09af 100644 --- a/.github/workflows/smoke.yml +++ b/.github/workflows/smoke.yml @@ -5,12 +5,13 @@ on: paths-ignore: - 'docs/**' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: smoke_test: runs-on: buildjet-16vcpu-ubuntu-2204 - concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true environment: smoke-test steps: - uses: actions/checkout@v4 @@ -39,3 +40,30 @@ jobs: - name: Display smoke-test logs if: always() run: cat deployments/logs/smoke-*.log + + pmonitor-integration: + runs-on: buildjet-16vcpu-ubuntu-2204 + steps: + - uses: actions/checkout@v4 + with: + lfs: true + + - name: install nix + uses: nixbuild/nix-quick-install-action@v28 + + - name: setup nix cache + uses: nix-community/cache-nix-action@v5 + with: + primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix') }} + restore-prefixes-first-match: nix-${{ runner.os }}- + backend: buildjet + + - name: Load rust cache + uses: astriaorg/buildjet-rust-cache@v2.5.1 + + # Confirm that the nix devshell is buildable and runs at all. + - name: validate nix env + run: nix develop --command echo hello + + - name: run the pmonitor integration tests + run: nix develop --command just test-pmonitor diff --git a/Cargo.lock b/Cargo.lock index 4681e50097..965eea0d35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5904,6 +5904,43 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "pmonitor" +version = "0.80.6" +dependencies = [ + "anyhow", + "assert_cmd", + "camino", + "clap", + "colored", + "directories", + "futures", + "indicatif", + "once_cell", + "pcli", + "penumbra-app", + "penumbra-asset", + "penumbra-compact-block", + "penumbra-keys", + "penumbra-num", + "penumbra-proto", + "penumbra-shielded-pool", + "penumbra-stake", + "penumbra-tct", + "penumbra-view", + "regex", + "serde", + "serde_json", + "tempfile", + "tokio", + "toml 0.7.8", + "tonic", + "tracing", + "tracing-subscriber 0.3.18", + "url", + "uuid", +] + [[package]] name = "polling" version = "2.8.0" @@ -8588,6 +8625,7 @@ checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" dependencies = [ "getrandom", "rand", + "serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7ae2d4d471..3d2de2f1bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ members = [ "crates/bin/pclientd", "crates/bin/pd", "crates/bin/pindexer", + "crates/bin/pmonitor", "crates/cnidarium", "crates/cnidarium-component", "crates/core/app", diff --git a/crates/bin/pcli/src/opt.rs b/crates/bin/pcli/src/opt.rs index ca4f644f01..bfa9a6cc1c 100644 --- a/crates/bin/pcli/src/opt.rs +++ b/crates/bin/pcli/src/opt.rs @@ -156,7 +156,7 @@ impl Opt { tracing::info!(%path, "using local view service"); let registry_path = self.home.join("registry.json"); - // Check if the path exists or set it to nojne + // Check if the path exists or set it to none let registry_path = if registry_path.exists() { Some(registry_path) } else { diff --git a/crates/bin/pmonitor/Cargo.toml b/crates/bin/pmonitor/Cargo.toml new file mode 100644 index 0000000000..71612426ba --- /dev/null +++ b/crates/bin/pmonitor/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "pmonitor" +version = { workspace = true } +authors = { workspace = true } +edition = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = false + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = {workspace = true} +camino = {workspace = true} +clap = {workspace = true, features = ["derive", "env"]} +colored = "2.1.0" +directories = {workspace = true} +futures = {workspace = true} +indicatif = {workspace = true} +pcli = {path = "../pcli", default-features = true} +penumbra-app = {workspace = true} +penumbra-asset = {workspace = true, default-features = false} +penumbra-compact-block = {workspace = true, default-features = false} +penumbra-keys = {workspace = true, default-features = false} +penumbra-num = {workspace = true, default-features = false} +penumbra-proto = {workspace = true} +penumbra-shielded-pool = {workspace = true, default-features = false} +penumbra-stake = {workspace = true, default-features = false} +penumbra-tct = {workspace = true, default-features = false} +penumbra-view = {workspace = true} +regex = {workspace = true} +serde = {workspace = true, features = ["derive"]} +serde_json = {workspace = true} +tokio = {workspace = true, features = ["full"]} +toml = {workspace = true} +tonic = {workspace = true, features = ["tls-webpki-roots", "tls"]} +tracing = {workspace = true} +tracing-subscriber = { workspace = true, features = ["env-filter", "ansi"] } +url = {workspace = true, features = ["serde"]} +uuid = { version = "1.3", features = ["v4", "serde"] } + +[dev-dependencies] +assert_cmd = {workspace = true} +once_cell = {workspace = true} +tempfile = {workspace = true} diff --git a/crates/bin/pmonitor/src/config.rs b/crates/bin/pmonitor/src/config.rs new file mode 100644 index 0000000000..8b521ff8a1 --- /dev/null +++ b/crates/bin/pmonitor/src/config.rs @@ -0,0 +1,118 @@ +//! Logic for reading and writing config files for `pmonitor`, in the TOML format. +use anyhow::Result; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use url::Url; +use uuid::Uuid; + +use penumbra_keys::FullViewingKey; +use penumbra_num::Amount; + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct FvkEntry { + pub fvk: FullViewingKey, + pub wallet_id: Uuid, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Representation of a single Penumbra wallet to track. +pub struct AccountConfig { + /// The initial [FullViewingKey] has specified during `pmonitor init`. + /// + /// Distinct because the tool understands account migrations. + original: FvkEntry, + /// The amount held by the account at the time of genesis. + genesis_balance: Amount, + /// List of account migrations, performed via `pcli migrate balance`, if any. + migrations: Vec, +} + +impl AccountConfig { + pub fn new(original: FvkEntry, genesis_balance: Amount) -> Self { + Self { + original, + genesis_balance, + migrations: vec![], + } + } + + /// Get original/genesis FVK. + pub fn original_fvk(&self) -> FullViewingKey { + self.original.fvk.clone() + } + + /// Get genesis balance. + pub fn genesis_balance(&self) -> Amount { + self.genesis_balance + } + + /// Add migration to the account config. + pub fn add_migration(&mut self, fvk_entry: FvkEntry) { + self.migrations.push(fvk_entry); + } + + /// Get the active wallet, which is the last migration or the original FVK if no migrations have occurred. + pub fn active_wallet(&self) -> FvkEntry { + if self.migrations.is_empty() { + self.original.clone() + } else { + self.migrations + .last() + .expect("migrations must not be empty") + .clone() + } + } + + pub fn active_fvk(&self) -> FullViewingKey { + self.active_wallet().fvk + } + + pub fn active_uuid(&self) -> Uuid { + self.active_wallet().wallet_id + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// The primary TOML file for configuring `pmonitor`, containing all its account info. +/// +/// During `pmonitor audit` runs, the config will be automatically updated +/// if tracked FVKs were detected to migrate, via `pcli migrate balance`, to save time +/// on future syncs. +pub struct PmonitorConfig { + /// The gRPC URL for a Penumbra node's `pd` endpoint, used for retrieving account activity. + grpc_url: Url, + /// The list of Penumbra wallets to track. + accounts: Vec, +} + +impl PmonitorConfig { + pub fn new(grpc_url: Url, accounts: Vec) -> Self { + Self { grpc_url, accounts } + } + + pub fn grpc_url(&self) -> Url { + self.grpc_url.clone() + } + + pub fn accounts(&self) -> &Vec { + &self.accounts + } + + pub fn set_account(&mut self, index: usize, account: AccountConfig) { + self.accounts[index] = account; + } +} + +/// Get the destination FVK from a migration memo. +pub fn parse_dest_fvk_from_memo(memo: &str) -> Result { + let re = Regex::new(r"Migrating balance from .+ to (.+)")?; + if let Some(captures) = re.captures(memo) { + if let Some(dest_fvk_str) = captures.get(1) { + return dest_fvk_str + .as_str() + .parse::() + .map_err(|_| anyhow::anyhow!("Invalid destination FVK in memo")); + } + } + Err(anyhow::anyhow!("Could not parse destination FVK from memo")) +} diff --git a/crates/bin/pmonitor/src/genesis.rs b/crates/bin/pmonitor/src/genesis.rs new file mode 100644 index 0000000000..a526e2899d --- /dev/null +++ b/crates/bin/pmonitor/src/genesis.rs @@ -0,0 +1,117 @@ +//! Logic for inspecting the [CompactBlock] at genesis of the target chain. +//! Used to compute balances for tracked FVKs at genesis time. The initial genesis balance is +//! stored in the `pmonitor` config file, so that audit actions can reference it. +use std::{collections::BTreeMap, str::FromStr}; + +use penumbra_asset::STAKING_TOKEN_ASSET_ID; +use penumbra_compact_block::{CompactBlock, StatePayload}; +use penumbra_keys::FullViewingKey; +use penumbra_num::Amount; +use penumbra_shielded_pool::{Note, NotePayload}; +use penumbra_stake::{ + rate::{BaseRateData, RateData}, + DelegationToken, +}; +use penumbra_tct::StateCommitment; + +#[derive(Debug, Clone)] +pub struct FilteredGenesisBlock { + // Notes per FVK + #[allow(dead_code)] + pub notes: BTreeMap>, + // UM-equivalent balances per FVK + pub balances: BTreeMap, +} + +/// Scanning of the genesis `CompactBlock` with a list of FVKs to determine the +/// initial balances of the relevant addresses. +/// +/// Assumption: There are no swaps or nullifiers in the genesis block. +pub async fn scan_genesis_block( + CompactBlock { + height, + state_payloads, + .. + }: CompactBlock, + fvks: Vec, +) -> anyhow::Result { + assert_eq!(height, 0); + + let mut notes = BTreeMap::new(); + let mut balances = BTreeMap::new(); + + // Calculate the rate data for each validator in the initial validator set. + let base_rate = BaseRateData { + epoch_index: 0, + base_reward_rate: 0u128.into(), + base_exchange_rate: 1_0000_0000u128.into(), + }; + + // We proceed one FVK at a time. + for fvk in fvks { + // Trial-decrypt a note with our a specific viewing key + let trial_decrypt_note = + |note_payload: NotePayload| -> tokio::task::JoinHandle> { + let fvk2 = fvk.clone(); + tokio::spawn(async move { note_payload.trial_decrypt(&fvk2) }) + }; + + // Trial-decrypt the notes in this block, keeping track of the ones that were meant for the FVK + // we're monitoring. + let mut note_decryptions = Vec::new(); + + // We only care about notes, so we're ignoring swaps and rolled-up commitments. + for payload in state_payloads.iter() { + if let StatePayload::Note { note, .. } = payload { + note_decryptions.push(trial_decrypt_note((**note).clone())); + } + } + + let mut notes_for_this_fvk = BTreeMap::new(); + for decryption in note_decryptions { + if let Some(note) = decryption + .await + .expect("able to join tokio note decryption handle") + { + notes_for_this_fvk.insert(note.commit(), note.clone()); + + // Balance is expected to be in the staking or delegation token + let note_value = note.value(); + if note_value.asset_id == *STAKING_TOKEN_ASSET_ID { + balances + .entry(fvk.to_string()) + .and_modify(|existing_amount| *existing_amount += note.amount()) + .or_insert(note.amount()); + } else if let Ok(delegation_token) = + DelegationToken::from_str(¬e_value.asset_id.to_string()) + { + // We need to convert the amount to the UM-equivalent amount + let rate_data = RateData { + identity_key: delegation_token.validator(), + validator_reward_rate: 0u128.into(), + validator_exchange_rate: base_rate.base_exchange_rate, + }; + let um_equivalent_balance = rate_data.unbonded_amount(note.amount()); + + balances + .entry(fvk.to_string()) + .and_modify(|existing_amount| *existing_amount += um_equivalent_balance) + .or_insert(um_equivalent_balance); + } else { + tracing::warn!( + "ignoring note with unknown asset id: {}", + note_value.asset_id + ); + } + } + } + + // Save all the notes for this FVK, and continue. + notes.insert(fvk.to_string(), notes_for_this_fvk); + } + + // Construct filtered genesis block with allocations + let result = FilteredGenesisBlock { notes, balances }; + + Ok(result) +} diff --git a/crates/bin/pmonitor/src/main.rs b/crates/bin/pmonitor/src/main.rs new file mode 100644 index 0000000000..372f425adb --- /dev/null +++ b/crates/bin/pmonitor/src/main.rs @@ -0,0 +1,598 @@ +//! The `pmonitor` tool tracks the balances of Penumbra wallets, as identified +//! by a [FullViewingKey] (FVK), in order to perform auditing. It accepts a JSON file +//! of FVKs and a `pd` gRPC URL to initialize: +//! +//! pmonitor init --grpc-url http://127.0.0.1:8080 --fvks fvks.json +//! +//! The audit functionality runs as a single operation, evaluating compliance up to the +//! current block height: +//! +//! pmonitor audit +//! +//! If regular auditing is desired, consider automating the `pmonitor audit` action via +//! cron or similar. `pmonitor` will cache view databases for each tracked FVK, so that future +//! `audit` actions need only inspect the blocks generated between the previous audit and the +//! current height. + +use anyhow::{Context, Result}; +use camino::Utf8PathBuf; +use clap::{self, Parser}; +use directories::ProjectDirs; +use futures::StreamExt; +use penumbra_asset::STAKING_TOKEN_ASSET_ID; +use std::fs; +use std::io::IsTerminal as _; +use std::str::FromStr; +use tonic::transport::{Channel, ClientTlsConfig}; +use tracing_subscriber::{prelude::*, EnvFilter}; +use url::Url; +use uuid::Uuid; + +use colored::Colorize; + +use pcli::config::PcliConfig; +use penumbra_compact_block::CompactBlock; +use penumbra_keys::FullViewingKey; +use penumbra_num::Amount; +use penumbra_proto::box_grpc_svc; +use penumbra_proto::view::v1::{ + view_service_client::ViewServiceClient, view_service_server::ViewServiceServer, +}; +use penumbra_proto::{ + core::component::compact_block::v1::CompactBlockRequest, + core::component::stake::v1::query_service_client::QueryServiceClient as StakeQueryServiceClient, + penumbra::core::component::compact_block::v1::query_service_client::QueryServiceClient as CompactBlockQueryServiceClient, +}; +use penumbra_stake::rate::RateData; +use penumbra_stake::DelegationToken; +use penumbra_view::{Storage, ViewClient, ViewServer}; + +mod config; +mod genesis; + +use config::{parse_dest_fvk_from_memo, AccountConfig, FvkEntry, PmonitorConfig}; + +/// The maximum size of a compact block, in bytes (12MB). +const MAX_CB_SIZE_BYTES: usize = 12 * 1024 * 1024; + +/// The name of the view database file +const VIEW_FILE_NAME: &str = "pcli-view.sqlite"; + +/// The permitted difference between genesis balance and current balance, +/// specified in number of staking tokens. +const ALLOWED_DISCREPANCY: f64 = 0.1; + +/// Configure tracing_subscriber for logging messages +fn init_tracing() -> anyhow::Result<()> { + // Instantiate tracing layers. + // The `FmtLayer` is used to print to the console. + let fmt_layer = tracing_subscriber::fmt::layer() + .with_ansi(std::io::stdout().is_terminal()) + .with_writer(std::io::stderr) + .with_target(true); + // The `EnvFilter` layer is used to filter events based on `RUST_LOG`. + let filter_layer = EnvFilter::try_from_default_env() + .or_else(|_| EnvFilter::try_new("info,penumbra_view=off"))?; + + // Register the tracing subscribers. + let registry = tracing_subscriber::registry() + .with(filter_layer) + .with(fmt_layer); + registry.init(); + Ok(()) +} + +#[tokio::main] +async fn main() -> Result<()> { + let opt = Opt::parse(); + init_tracing()?; + tracing::info!(?opt, version = env!("CARGO_PKG_VERSION"), "running command"); + opt.exec().await +} + +/// The path to the default `pmonitor` home directory. +/// +/// Can be overridden on the command-line via `--home`. +pub fn default_home() -> Utf8PathBuf { + let path = ProjectDirs::from("zone", "penumbra", "pmonitor") + .expect("Failed to get platform data dir") + .data_dir() + .to_path_buf(); + Utf8PathBuf::from_path_buf(path).expect("Platform default data dir was not UTF-8") +} + +#[derive(Debug, Parser)] +#[clap( + name = "pmonitor", + about = "The Penumbra account activity monitor.", + version +)] +pub struct Opt { + /// Command to run. + #[clap(subcommand)] + pub cmd: Command, + /// The path used to store pmonitor state. + #[clap(long, default_value_t = default_home(), env = "PENUMBRA_PMONITOR_HOME")] + pub home: Utf8PathBuf, +} + +#[derive(Debug, clap::Subcommand)] +pub enum Command { + /// Generate configs for `pmonitor`. + Init { + /// Provide JSON file with the list of full viewing keys to monitor. + #[clap(long, display_order = 200)] + fvks: String, + /// Sets the URL of the gRPC endpoint used to sync the wallets. + #[clap( + long, + display_order = 900, + parse(try_from_str = Url::parse) + )] + grpc_url: Url, + }, + /// Sync to latest block height and verify all configured wallets have the correct balance. + Audit {}, + /// Delete `pmonitor` storage to reset local state. + Reset {}, +} + +impl Opt { + /// Set up the view service for a given wallet. + pub async fn view( + &self, + path: Utf8PathBuf, + fvk: FullViewingKey, + grpc_url: Url, + ) -> Result> { + let registry_path = path.join("registry.json"); + // Check if the path exists or set it to none + let registry_path = if registry_path.exists() { + Some(registry_path) + } else { + None + }; + let db_path: Utf8PathBuf = path.join(VIEW_FILE_NAME); + + let svc: ViewServer = + ViewServer::load_or_initialize(Some(db_path), registry_path, &fvk, grpc_url).await?; + + let svc: ViewServiceServer = ViewServiceServer::new(svc); + let view_service = ViewServiceClient::new(box_grpc_svc::local(svc)); + Ok(view_service) + } + + /// Get the path to the wallet directory for a given wallet ID. + pub fn wallet_path(&self, wallet_id: &Uuid) -> Utf8PathBuf { + self.home.join(format!("wallet_{}", wallet_id)) + } + + /// Sync a given wallet to the latest block height. + pub async fn sync( + &self, + view_service: &mut ViewServiceClient, + ) -> Result<()> { + let mut status_stream = ViewClient::status_stream(view_service).await?; + + let initial_status = status_stream + .next() + .await + .transpose()? + .ok_or_else(|| anyhow::anyhow!("view service did not report sync status"))?; + + tracing::debug!( + "scanning blocks from last sync height {} to latest height {}", + initial_status.full_sync_height, + initial_status.latest_known_block_height, + ); + + // use indicatif::{ProgressBar, ProgressDrawTarget, ProgressStyle}; + // let progress_bar = ProgressBar::with_draw_target( + // initial_status.latest_known_block_height - initial_status.full_sync_height, + // ProgressDrawTarget::stdout(), + // ) + // .with_style( + // ProgressStyle::default_bar() + // .template("[{elapsed}] {bar:50.cyan/blue} {pos:>7}/{len:7} {per_sec} ETA: {eta}"), + // ); + // progress_bar.set_position(0); + + // On large networks, logging an update every 100k blocks or so seems reasonable. + // let log_every_n_blocks = 100000; + let log_every_n_blocks = 100; + while let Some(status) = status_stream.next().await.transpose()? { + if status.full_sync_height % log_every_n_blocks == 0 { + tracing::debug!("synced {} blocks", status.full_sync_height); + } + // progress_bar.set_position(status.full_sync_height - initial_status.full_sync_height); + } + // progress_bar.finish(); + + Ok(()) + } + + /// Fetch the genesis compact block + pub async fn fetch_genesis_compact_block(&self, grpc_url: Url) -> Result { + let height = 0; + let mut client = CompactBlockQueryServiceClient::connect(grpc_url.to_string()) + .await? + .max_decoding_message_size(MAX_CB_SIZE_BYTES); + let compact_block = client + .compact_block(CompactBlockRequest { height }) + .await? + .into_inner() + .compact_block + .expect("response has compact block"); + compact_block.try_into() + } + + /// Stolen from pcli + pub async fn pd_channel(&self, grpc_url: Url) -> anyhow::Result { + match grpc_url.scheme() { + "http" => Ok(Channel::from_shared(grpc_url.to_string())? + .connect() + .await?), + "https" => Ok(Channel::from_shared(grpc_url.to_string())? + .tls_config(ClientTlsConfig::new())? + .connect() + .await?), + other => Err(anyhow::anyhow!("unknown url scheme {other}")) + .with_context(|| format!("could not connect to {}", grpc_url)), + } + } + + /// Create wallet given a path and fvk + pub async fn create_wallet( + &self, + wallet_dir: &Utf8PathBuf, + fvk: &FullViewingKey, + grpc_url: &Url, + ) -> Result<()> { + // Create the wallet directory if it doesn't exist + if !wallet_dir.exists() { + fs::create_dir_all(&wallet_dir)?; + } + + // Use FVK to build a pcli config file, + // which we'll reference when syncing wallets. + let pcli_config = PcliConfig { + grpc_url: grpc_url.clone(), + view_url: None, + governance_custody: None, + full_viewing_key: fvk.clone(), + disable_warning: true, + custody: pcli::config::CustodyConfig::ViewOnly, + }; + + let pcli_config_path = wallet_dir.join("config.toml"); + pcli_config.save(pcli_config_path).with_context(|| { + format!("failed to initialize wallet in {}", wallet_dir.to_string()) + })?; + + Ok(()) + } + + /// Compute the UM-equivalent balance for a given (synced) wallet. + pub async fn compute_um_equivalent_balance( + &self, + view_client: &mut ViewServiceClient, + stake_client: &mut StakeQueryServiceClient, + ) -> Result { + let notes = view_client.unspent_notes_by_asset_and_address().await?; + let mut total_um_equivalent_amount = Amount::from(0u64); + for (asset_id, map) in notes.iter() { + if *asset_id == *STAKING_TOKEN_ASSET_ID { + let total_amount = map + .iter() + .map(|(_, spendable_notes)| { + spendable_notes + .iter() + .map(|spendable_note| spendable_note.note.amount()) + .sum::() + }) + .sum::(); + total_um_equivalent_amount += total_amount; + } else if let Ok(delegation_token) = DelegationToken::from_str(&asset_id.to_string()) { + let total_amount = map + .iter() + .map(|(_, spendable_notes)| { + spendable_notes + .iter() + .map(|spendable_note| spendable_note.note.amount()) + .sum::() + }) + .sum::(); + + // We need to convert the amount to the UM-equivalent amount using the appropriate rate data + let rate_data: RateData = stake_client + .current_validator_rate(tonic::Request::new( + (delegation_token.validator()).into(), + )) + .await? + .into_inner() + .try_into()?; + let um_equivalent_balance = rate_data.unbonded_amount(total_amount); + total_um_equivalent_amount += um_equivalent_balance; + }; + } + Ok(total_um_equivalent_amount) + } + + /// Execute the specified command. + pub async fn exec(&self) -> Result<()> { + let opt = self; + match &opt.cmd { + Command::Reset {} => { + // Delete the home directory + fs::remove_dir_all(&opt.home)?; + println!( + "Successfully cleaned up pmonitor directory: \"{}\"", + opt.home + ); + Ok(()) + } + Command::Init { fvks, grpc_url } => { + // Parse the JSON file into a list of full viewing keys + let fvks_str = fs::read_to_string(fvks)?; + + // Take elements from the array and parse them into FullViewingKeys + let fvk_string_list: Vec = serde_json::from_str(&fvks_str)?; + let fvk_list: Vec = fvk_string_list + .iter() + .map(|fvk| FullViewingKey::from_str(&fvk)) + .collect::>>()?; + println!("Successfully read FVKs from provided file"); + + // Create the home directory if it doesn't exist + if !opt.home.exists() { + fs::create_dir_all(&opt.home)?; + } else { + anyhow::bail!("pmonitor home directory already exists: {}", opt.home); + } + + // During init, we also compute and save the genesis balance for each + // FVK, since that won't change in the future. + let genesis_compact_block = + self.fetch_genesis_compact_block(grpc_url.clone()).await?; + println!("About to scan the genesis block... this may take a moment"); + let genesis_filtered_block = + genesis::scan_genesis_block(genesis_compact_block, fvk_list.clone()).await?; + + let mut accounts = Vec::new(); + + // Now we need to make subdirectories for each of the FVKs and setup their + // config files, with the selected FVK and GRPC URL. + for fvk in fvk_list.iter() { + let wallet_id = Uuid::new_v4(); + let wallet_dir = self.wallet_path(&wallet_id); + tracing::debug!("creating wallet at {}", wallet_dir.to_string()); + self.create_wallet(&wallet_dir, &fvk, &grpc_url).await?; + + accounts.push(AccountConfig::new( + FvkEntry { + fvk: fvk.clone(), + wallet_id, + }, + *(genesis_filtered_block + .balances + .get(&fvk.to_string()) + .unwrap_or(&Amount::from(0u64))), + )); + } + + tracing::info!("successfully initialized {} wallets", accounts.len()); + let pmonitor_config = PmonitorConfig::new(grpc_url.clone(), accounts); + + // Save the config + let config_path = opt.home.join("pmonitor_config.toml"); + fs::write(config_path, toml::to_string(&pmonitor_config)?)?; + + Ok(()) + } + Command::Audit {} => { + // Parse the config file to get the accounts to monitor. + // + // Note that each logical genesis entry might now have one or more FVKs, depending on if the + // user migrated their account to a new FVK, i.e. if they migrated once, they'll have two + // FVKs. This can happen an unlimited number of times. + let config_path = opt.home.join("pmonitor_config.toml"); + let pmonitor_config: PmonitorConfig = + toml::from_str(&fs::read_to_string(config_path.clone()).context(format!( + "failed to load pmonitor config file: {}", + config_path + ))?)?; + + let mut stake_client = StakeQueryServiceClient::new( + self.pd_channel(pmonitor_config.grpc_url()).await?, + ); + + // Sync each wallet to the latest block height, check for new migrations, and check the balance. + let mut updated_config = pmonitor_config.clone(); + let mut config_updated = false; + + let num_accounts = pmonitor_config.accounts().len(); + + // Create bucket for documenting non-compliant FVKs, for reporting in summary. + let mut failures: Vec<&AccountConfig> = vec![]; + + for (index, config) in pmonitor_config.accounts().iter().enumerate() { + let active_fvk = config.active_fvk(); + let active_path = self.wallet_path(&config.active_uuid()); + tracing::info!( + "syncing wallet {}/{}: {}", + index + 1, + num_accounts, + active_path.to_string() + ); + + let mut view_client = self + .view( + active_path.clone(), + active_fvk.clone(), + pmonitor_config.grpc_url(), + ) + .await?; + + // todo: do this in parallel? + self.sync(&mut view_client).await?; + tracing::debug!("finished syncing wallet {}/{}", index + 1, num_accounts); + + // Check if the account has been migrated + let storage = Storage::load_or_initialize( + Some(active_path.join(VIEW_FILE_NAME)), + &active_fvk, + pmonitor_config.grpc_url(), + ) + .await?; + + let migration_tx = storage + .transactions_matching_memo(format!( + // N.B. the `%` symbol is an SQLite wildcard, required to match the + // remainder of the memo field. + "Migrating balance from {}%", + active_fvk.to_string() + )) + .await?; + if migration_tx.is_empty() { + tracing::debug!( + "account has not been migrated, continuing using existing FVK..." + ); + } else if migration_tx.len() == 1 { + tracing::warn!( + "❗ account has been migrated to new FVK, continuing using new FVK..." + ); + let (_, _, _tx, memo_text) = &migration_tx[0]; + let new_fvk = parse_dest_fvk_from_memo(&memo_text)?; + let wallet_id = Uuid::new_v4(); + let wallet_dir = self.wallet_path(&wallet_id); + self.create_wallet(&wallet_dir, &new_fvk, &pmonitor_config.grpc_url()) + .await?; + + let new_fvk_entry = FvkEntry { + fvk: new_fvk.clone(), + wallet_id, + }; + // Mark that the config needs to get saved again for the next time we run the audit command. + config_updated = true; + + // We need to update the config with the new FVK and path on disk + // to the wallet for the next time we run the audit command. + let mut new_config_entry = config.clone(); + new_config_entry.add_migration(new_fvk_entry); + updated_config.set_account(index, new_config_entry.clone()); + + view_client = self + .view(wallet_dir, new_fvk.clone(), pmonitor_config.grpc_url()) + .await?; + + tracing::info!("syncing migrated wallet"); + self.sync(&mut view_client).await?; + tracing::info!("finished syncing migrated wallet"); + // Now we can exit the else if statement and continue by computing the balance, + // which will use the new migrated wallet. + } else { + // we expect a single migration tx per FVK, if this assumption is violated we should bail. + anyhow::bail!( + "Expected a single migration tx, found {}", + migration_tx.len() + ); + } + + let current_um_equivalent_amount = self + .compute_um_equivalent_balance(&mut view_client, &mut stake_client) + .await?; + + tracing::debug!("original FVK: {:?}", config.original_fvk()); + + let genesis_um_equivalent_amount = config.genesis_balance(); + // Let the user know if the balance is unexpected or not + if check_wallet_compliance( + genesis_um_equivalent_amount, + current_um_equivalent_amount, + ) { + tracing::info!( + ?genesis_um_equivalent_amount, + ?current_um_equivalent_amount, + "✅ expected balance! current balance is within compliant range of the genesis balance", + ); + } else { + tracing::error!( + ?genesis_um_equivalent_amount, + ?current_um_equivalent_amount, + "❌ unexpected balance! current balance is less than the genesis balance, by more than {ALLOWED_DISCREPANCY}UM", + ); + failures.push(config); + } + } + + // If at any point we marked the config for updating, we need to save it. + if config_updated { + fs::write(config_path.clone(), toml::to_string(&updated_config)?)?; + } + + // Print summary message + emit_summary_message(pmonitor_config.accounts(), failures)?; + + Ok(()) + } + } + } +} + +/// Prepare a human-readable text summary at the end of the audit run. +/// This is important, as errors logged during scanning are likely to be off-screen +/// due to backscroll. +fn emit_summary_message( + all_accounts: &Vec, + failures: Vec<&AccountConfig>, +) -> Result<()> { + println!("#######################"); + println!("Summary of FVK scanning"); + println!("#######################"); + println!("Total number of FVKs scanned: {}", all_accounts.len(),); + let compliant_count = format!( + "Number deemed compliant: {}", + all_accounts.len() - failures.len(), + ); + let failure_count = format!("Number deemed in violation: {}", failures.len(),); + if failures.is_empty() { + println!("{}", compliant_count.green()); + println!("{}", failure_count); + } else { + println!("{}", compliant_count.yellow()); + println!("{}", failure_count.red()); + println!("The non-compliant FVKs are:"); + println!(""); + for f in &failures { + println!("\t* {}", f.active_fvk().to_string()); + } + println!(""); + // println!("{}", "Error: non-compliant balances were detected".red()); + anyhow::bail!("non-compliant balances were detected".red()); + } + Ok(()) +} + +/// Check whether the wallet is compliant. +/// +/// Rather than a naive comparison that the current balance is greater than or +/// equal to the genesis balance, we permit less than within a tolerance of +/// 0.1UM. Doing so allows for discrepancies due to gas fees, for instance +/// if `pcli migrate balance` was used. +fn check_wallet_compliance(genesis_balance: Amount, current_balance: Amount) -> bool { + // Since the `Amount` of the staking token will be in millionths, + // we multiply 0.1 * 1_000_000. + let allowed_discrepancy = ALLOWED_DISCREPANCY * 1_000_000 as f64; + let mut result = false; + if current_balance >= genesis_balance { + result = true; + } else { + let actual_discrepancy = genesis_balance - current_balance; + let discrepancy_formatted = f64::from(actual_discrepancy) / 1_000_000 as f64; + tracing::trace!("detected low balance, missing {}UM", discrepancy_formatted); + if f64::from(actual_discrepancy) <= allowed_discrepancy { + result = true + } + } + result +} diff --git a/crates/bin/pmonitor/tests/common/mod.rs b/crates/bin/pmonitor/tests/common/mod.rs new file mode 100644 index 0000000000..a1465ee712 --- /dev/null +++ b/crates/bin/pmonitor/tests/common/mod.rs @@ -0,0 +1,332 @@ +//! Integration test helpers for `pmonitor`. +//! Contains logic to bootstrap a local devnet, complete with genesis +//! allocations for pre-existing wallets, so that `pmonitor` can audit +//! the behavior of those wallets on the target chain. + +use anyhow::{Context, Result}; +use assert_cmd::Command as AssertCommand; +use once_cell::sync::Lazy; +use pcli::config::PcliConfig; +use penumbra_keys::address::Address; +use std::fs::{create_dir_all, remove_dir_all, File}; +use std::io::{BufWriter, Write}; +use std::path::PathBuf; +use std::process::{Child, Command, Stdio}; +use std::time::Duration; +pub mod pcli_helpers; +use crate::common::pcli_helpers::{pcli_init_softkms, pcli_view_address}; + +/// The TCP port for the process-compose API, used to start/stop devnet. +const PROCESS_COMPOSE_PORT: u16 = 8888; + +/// The path in-repo to the `process-compose` manifest used for running a devnet, +/// relative to the current crate root. This is a minimal manifest, that only runs pd & cometbft. +static PROCESS_COMPOSE_MANIFEST_FILEPATH: Lazy = Lazy::new(|| { + let p: PathBuf = [ + env!("CARGO_MANIFEST_DIR"), + "..", + "..", + "..", + "deployments", + "compose", + "process-compose.yml", + ] + .iter() + .collect(); + p +}); + +/// The path to the root of the git repo, used for setting the working directory +/// when running `process-compose`. +static REPO_ROOT: Lazy = Lazy::new(|| { + let p: PathBuf = [env!("CARGO_MANIFEST_DIR"), "../", "../", "../"] + .iter() + .collect(); + p +}); + +/// Manager for running suites of integration tests for `pmonitor`. +/// Only one instance should exist at a time! The test suites +/// assume access to global resources such as 8080/TCP for pd, +/// and a hardcoded directory in `/tmp/` for the pmonitor configs. +pub struct PmonitorTestRunner { + /// Top-level directory for storing all integration test info, + /// such as wallets and pd network state. + pmonitor_integration_test_dir: PathBuf, + /// How many client wallets to create for testing. + num_wallets: u16, +} + +/// Make sure to halt the running devnet, regardless of test pass/fail. +impl Drop for PmonitorTestRunner { + fn drop(&mut self) { + let _result = self.stop_devnet(); + } +} + +impl PmonitorTestRunner { + /// Create a new test runner environment. + /// Caller must ensure no other instances exist, because this method + /// will destroy existing test data directories. + pub fn new() -> Self { + // Ideally we'd use a tempdir but using a hardcoded dir for debugging. + let p: PathBuf = ["/tmp", "pmonitor-integration-test"].iter().collect(); + // Nuke any pre-existing state + if p.exists() { + remove_dir_all(&p).expect("failed to remove directory for pmonitor integration tests"); + } + // Ensure parent dir exists; other methods will create subdirs as necessary. + create_dir_all(&p).expect("failed to create directory for pmonitor integration tests"); + Self { + pmonitor_integration_test_dir: p, + num_wallets: 10, + } + } + // Return path for pmonitor home directory. + // Does not create the path, because `pmonitor` will fail if its home already exists. + pub fn pmonitor_home(&self) -> PathBuf { + self.pmonitor_integration_test_dir.join("pmonitor") + } + // Create directory and return path for storing client wallets + pub fn wallets_dir(&self) -> Result { + let p = self.pmonitor_integration_test_dir.join("wallets"); + create_dir_all(&p)?; + Ok(p) + } + + /// Initialize local pcli configs for all wallets specified in config. + pub fn create_pcli_wallets(&self) -> anyhow::Result<()> { + for i in 0..self.num_wallets - 1 { + let pcli_home = self.wallets_dir()?.join(format!("wallet-{}", i)); + pcli_init_softkms(&pcli_home)?; + } + Ok(()) + } + + /// Iterate over all client wallets and return a `PcliConfig` for each. + pub fn get_pcli_wallet_configs(&self) -> anyhow::Result> { + let mut results = Vec::::new(); + for i in 0..self.num_wallets - 1 { + let pcli_home = self.wallets_dir()?.join(format!("wallet-{}", i)); + let pcli_config_path = pcli_home.join("config.toml"); + let pcli_config = PcliConfig::load( + pcli_config_path + .to_str() + .expect("failed to convert pcli wallet path to str"), + )?; + results.push(pcli_config); + } + Ok(results) + } + + /// Iterate over all client wallets and return address 0 for each. + pub fn get_pcli_wallet_addresses(&self) -> anyhow::Result> { + let mut results = Vec::
::new(); + for i in 0..self.num_wallets - 1 { + let pcli_home = self.wallets_dir()?.join(format!("wallet-{}", i)); + let penumbra_address = pcli_view_address(&pcli_home)?; + results.push(penumbra_address); + } + Ok(results) + } + /// Iterate over all client wallets, grab an FVK for each, write those + /// FVKs to a local JSON file, and return the path to that file. + pub fn get_pcli_wallet_fvks_filepath(&self) -> anyhow::Result { + let p = self.pmonitor_integration_test_dir.join("fvks.json"); + if !p.exists() { + // We use a Vec rather than Vec so we get the string + // representations + let fvks: Vec = self + .get_pcli_wallet_configs()? + .into_iter() + .map(|c| c.full_viewing_key.to_string()) + .collect(); + let mut w = BufWriter::new(File::create(&p)?); + serde_json::to_writer(&mut w, &fvks)?; + w.flush()?; + } + Ok(p) + } + + /// Create a CSV file of genesis allocations for all pcli test wallets. + pub fn generate_genesis_allocations(&self) -> anyhow::Result { + let allocations_filepath = self.pmonitor_integration_test_dir.join("allocations.csv"); + + // Generate file contents + if !allocations_filepath.exists() { + let mut w = BufWriter::new(File::create(&allocations_filepath)?); + let csv_header = String::from("amount,denom,address\n"); + w.write(csv_header.as_bytes())?; + for a in self.get_pcli_wallet_addresses()? { + let allo = format!("1_000_000__000_000,upenumbra,{}\n1000,test_usd,{}\n", a, a); + w.write(allo.as_bytes())?; + } + w.flush()?; + } + Ok(allocations_filepath) + } + + /// Create a genesis event for the local devnet, with genesis allocations for all pcli wallets. + /// This is a *destructive* action, as it removes the contents of the default pd network_data + /// directory prior to generation. + pub fn generate_network_data(&self) -> anyhow::Result<()> { + // TODO: it'd be nice if we wrote all this network_data to a tempdir, + // but instead we just reuse the default pd home. + + let reset_cmd = AssertCommand::cargo_bin("pd")? + .args(["network", "unsafe-reset-all"]) + .output(); + assert!( + reset_cmd.unwrap().status.success(), + "failed to clear out prior local devnet config" + ); + + // Ideally we'd use a rust interface to compose the network config, rather than shelling + // out to `pd`, but the current API for network config isn't ergonomic. Also, we get free + // integration testing for the `pd` CLI by shelling out, which is nice. + let cmd = AssertCommand::cargo_bin("pd")? + .args([ + "network", + "generate", + "--chain-id", + "penumbra-devnet-pmonitor", + "--unbonding-delay", + "50", + "--epoch-duration", + "50", + "--proposal-voting-blocks", + "50", + "--timeout-commit", + "3s", + // we must opt in to fees, in order to test the migration functionality! + "--gas-price-simple", + "500", + // include allocations for the generated pcli wallets + "--allocations-input-file", + &self + .generate_genesis_allocations()? + .to_str() + .expect("failed to convert allocations csv to str"), + ]) + .output(); + assert!( + cmd.unwrap().status.success(), + "failed to generate local devnet config" + ); + Ok(()) + } + + /// Generate a config directory for `pmonitor`, based on input FVKs. + pub fn initialize_pmonitor(&self) -> anyhow::Result<()> { + let cmd = AssertCommand::cargo_bin("pmonitor")? + .args([ + "--home", + self.pmonitor_home() + .to_str() + .expect("failed to convert pmonitor home to str"), + "init", + "--grpc-url", + "http://127.0.0.1:8080", + "--fvks", + self.get_pcli_wallet_fvks_filepath() + .context("failed to get wallet fvks")? + .to_str() + .expect("failed to convert fvks json filepath to str"), + ]) + .output(); + + assert!( + cmd.unwrap().status.success(), + "failed to initialize pmonitor" + ); + Ok(()) + } + + /// Run `pmonitor audit` based on the pcli wallets and associated FVKs. + pub fn pmonitor_audit(&self) -> anyhow::Result<()> { + let p = self.pmonitor_integration_test_dir.join("pmonitor"); + let cmd = AssertCommand::cargo_bin("pmonitor")? + .args([ + "--home", + p.to_str().expect("failed to convert pmonitor home to str"), + "audit", + ]) + .ok(); + if cmd.is_ok() { + Ok(()) + } else { + anyhow::bail!("failed during 'pmonitor audit'") + } + } + + /// Halt any pre-existing local devnet for these integration tests. + /// We assume that the port `8888` is unique to the process-compose API for this test suite. + fn stop_devnet(&self) -> anyhow::Result<()> { + // Confirm that process-compose is installed, otherwise integration tests can't run. + Command::new("process-compose") + .arg("--help") + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status() + .expect("process-compose is not available on PATH; activate the nix dev env"); + + // Stop an existing devnet on the custom port; ignore error, since we don't know one is + // running. + let cmd = Command::new("process-compose") + .env("PC_PORT_NUM", PROCESS_COMPOSE_PORT.to_string()) + .arg("down") + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status(); + + match cmd { + Ok(_c) => { + tracing::trace!( + "'process-compose down' completed, sleeping briefly during teardown" + ); + + std::thread::sleep(Duration::from_secs(3)); + return Ok(()); + } + Err(_e) => { + tracing::trace!( + "'process-compose down' failed, presumably no prior network running" + ); + Ok(()) + } + } + } + + /// Run a local devnet based on input config. Returns a handle to the spawned process, + /// so that cleanup can be handled gracefully. + /// We assume that the port `8888` is unique to the process-compose API for this test suite. + pub fn start_devnet(&self) -> anyhow::Result { + // Ensure no other instance is currently running; + self.stop_devnet()?; + + self.generate_network_data()?; + + // Stop an existing devnet on the custom port; ignore error, since we don't know one is + // running. + let child = Command::new("process-compose") + .env("PC_PORT_NUM", PROCESS_COMPOSE_PORT.to_string()) + .current_dir(REPO_ROOT.as_os_str()) + .args([ + "up", + "--detached", + "--config", + PROCESS_COMPOSE_MANIFEST_FILEPATH + .to_str() + .expect("failed to convert process-compose manifest to str"), + ]) + .stdin(Stdio::null()) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn() + .expect("failed to execute devnet start cmd"); + // Sleep a bit, to let network start + // TODO: use process-compose API to check for "Running" status on pd. + std::thread::sleep(Duration::from_secs(8)); + Ok(child) + } +} diff --git a/crates/bin/pmonitor/tests/common/pcli_helpers.rs b/crates/bin/pmonitor/tests/common/pcli_helpers.rs new file mode 100644 index 0000000000..c62cc4c057 --- /dev/null +++ b/crates/bin/pmonitor/tests/common/pcli_helpers.rs @@ -0,0 +1,63 @@ +//! Convenience methods for wrangling `pcli` CLI invocations, +//! via `cargo bin` commands, for use in integration testing. + +use anyhow::{Context, Result}; +use assert_cmd::Command as AssertCommand; +use penumbra_keys::{address::Address, FullViewingKey}; +use std::path::PathBuf; +use std::str::FromStr; + +/// Initialize a new pcli wallet at the target directory. +/// Discards the generated seed phrase. +pub fn pcli_init_softkms(pcli_home: &PathBuf) -> Result<()> { + let mut cmd = AssertCommand::cargo_bin("pcli")?; + cmd.args([ + "--home", + pcli_home + .to_str() + .expect("can convert wallet path to string"), + "init", + "--grpc-url", + "http://127.0.0.1:8080", + "soft-kms", + "generate", + ]) + // send empty string to accept the interstitial seed phrase display + .write_stdin(""); + cmd.assert().success(); + Ok(()) +} + +/// Convenience method for looking up `address 0` from +/// pcli wallet stored at `pcli_home`. +pub fn pcli_view_address(pcli_home: &PathBuf) -> Result
{ + let output = AssertCommand::cargo_bin("pcli")? + .args(["--home", pcli_home.to_str().unwrap(), "view", "address"]) + .output() + .expect("failed to retrieve address from pcli wallet"); + + // Convert output to String, to trim trailing newline. + let mut a = String::from_utf8_lossy(&output.stdout).to_string(); + if a.ends_with('\n') { + a.pop(); + } + Address::from_str(&a).with_context(|| format!("failed to convert str to Address: '{}'", a)) +} + +/// Perform a `pcli migrate balance` transaction from the wallet at `pcli_home`, +/// transferring funds to the destination `FullViewingKey`. +pub fn pcli_migrate_balance(pcli_home: &PathBuf, fvk: &FullViewingKey) -> Result<()> { + let mut cmd = AssertCommand::cargo_bin("pcli")?; + cmd.args([ + "--home", + pcli_home + .to_str() + .expect("can convert wallet path to string"), + "migrate", + "balance", + ]) + // pipe FVK to stdin + .write_stdin(fvk.to_string()); + cmd.assert().success(); + Ok(()) +} diff --git a/crates/bin/pmonitor/tests/network_integration.rs b/crates/bin/pmonitor/tests/network_integration.rs new file mode 100644 index 0000000000..c5bad684fb --- /dev/null +++ b/crates/bin/pmonitor/tests/network_integration.rs @@ -0,0 +1,247 @@ +//! Integration integration testing of `pmonitor` against a local devnet. +//! Sets up various scenarios of genesis allocations, and ensures the tool reports +//! violations as errors. +//! +//! As a convenience to developers, there's a commented-out `sleep` call in the +//! `audit_passes_on_compliant_wallets` test. If enabled, the setup testbed can be interacted with +//! manually, which helps when trying to diagnose behavior of the tool. +use anyhow::Context; +use assert_cmd::Command as AssertCommand; +use pcli::config::PcliConfig; +mod common; +use crate::common::pcli_helpers::{pcli_init_softkms, pcli_migrate_balance, pcli_view_address}; +use crate::common::PmonitorTestRunner; + +#[ignore] +#[test] +/// Tests the simplest happy path for pmonitor: all wallets have genesis balances, +/// they never transferred any funds out, nor migrated balances, so all +/// current balances equal the genesis balances. In this case `pmonitor` +/// should exit 0. +fn audit_passes_on_compliant_wallets() -> anyhow::Result<()> { + tracing_subscriber::fmt::try_init().ok(); + let p = PmonitorTestRunner::new(); + p.create_pcli_wallets()?; + let _network = p.start_devnet()?; + p.initialize_pmonitor()?; + + // Debugging: uncomment the sleep line below if you want to interact with the pmonitor testbed + // that was set up already. Use e.g.: + // + // cargo run --bin pmonitor -- --home /tmp/pmonitor-integration-test/pmonitor audit + // + // to view the output locally. + // + // std::thread::sleep(std::time::Duration::from_secs(3600)); + + p.pmonitor_audit()?; + Ok(()) +} + +#[ignore] +#[test] +/// Tests another happy path for pmonitor: all wallets have genesis balances, +/// one of the wallets ran `pcli migrate balance` once. This means that all +/// wallets still have their genesis balance, save one, which has the genesis +/// balance minus gas fees. In this case, `pmonitor` should exit 0, +/// because it understood the balance migration and updated the FVK. +fn audit_passes_on_wallets_that_migrated_once() -> anyhow::Result<()> { + let p = PmonitorTestRunner::new(); + p.create_pcli_wallets()?; + let _network = p.start_devnet()?; + // Run audit once, to confirm compliance on clean slate. + p.initialize_pmonitor()?; + p.pmonitor_audit()?; + + // Create an empty wallet, with no genesis funds, to which we'll migrate a balance. + let alice_pcli_home = p.wallets_dir()?.join("wallet-alice"); + pcli_init_softkms(&alice_pcli_home)?; + let alice_pcli_config = PcliConfig::load( + alice_pcli_home + .join("config.toml") + .to_str() + .expect("failed to convert alice wallet to str"), + )?; + + // Take the second wallet, and migrate its balance to Alice. + let migrated_wallet = p.wallets_dir()?.join("wallet-1"); + pcli_migrate_balance(&migrated_wallet, &alice_pcli_config.full_viewing_key)?; + + // Now re-run the audit tool: it should report OK again, because all we did was migrate. + p.pmonitor_audit()?; + Ok(()) +} + +#[ignore] +#[test] +/// Tests another happy path for pmonitor: all wallets have genesis balances, +/// one of the wallets ran `pcli migrate balance` once, then that receiving +/// wallet ran `pcli migrate balance` itself, so the genesis funds are now +/// two (2) FVKs away from the original account. In this case, +/// pmonitor` should exit 0, because it understood all balance migrations +/// and updated the FVK in its config file accordingly. +fn audit_passes_on_wallets_that_migrated_twice() -> anyhow::Result<()> { + let p = PmonitorTestRunner::new(); + p.create_pcli_wallets()?; + let _network = p.start_devnet()?; + // Run audit once, to confirm compliance on clean slate. + p.initialize_pmonitor()?; + p.pmonitor_audit() + .context("failed unexpectedly during initial audit run")?; + + // Create an empty wallet, with no genesis funds, to which we'll migrate a balance. + let alice_pcli_home = p.wallets_dir()?.join("wallet-alice"); + pcli_init_softkms(&alice_pcli_home)?; + let alice_pcli_config = PcliConfig::load( + alice_pcli_home + .join("config.toml") + .to_str() + .expect("failed to convert alice wallet to str"), + )?; + + // Take the second wallet, and migrate its balance to Alice. + let migrated_wallet = p.wallets_dir()?.join("wallet-1"); + pcli_migrate_balance(&migrated_wallet, &alice_pcli_config.full_viewing_key)?; + + // Now re-run the audit tool: it should report OK again, because all we did was migrate. + p.pmonitor_audit() + .context("failed unexpectedly during second audit run")?; + + // Create another empty wallet, with no genesis funds, to which we'll migrate a balance. + let bob_pcli_home = p.wallets_dir()?.join("wallet-bob"); + pcli_init_softkms(&bob_pcli_home)?; + let bob_pcli_config = PcliConfig::load( + bob_pcli_home + .join("config.toml") + .to_str() + .expect("failed to convert bob wallet to str"), + )?; + + // Re-migrate the balance from Alice to Bob. + pcli_migrate_balance(&alice_pcli_home, &bob_pcli_config.full_viewing_key)?; + + // Now re-run the audit tool: it should report OK again, confirming that it + // successfully tracks multiple migratrions. + p.pmonitor_audit() + .context("failed unexpectedly during final audit run in test")?; + + Ok(()) +} +#[ignore] +#[test] +/// Tests an unhappy path for `pmonitor`: a single wallet has sent all its funds +/// to non-genesis account, via `pcli tx send` rather than `pcli migrate balance`. +/// In this case, `pmonitor` should exit non-zero. +fn audit_fails_on_misbehaving_wallet_that_sent_funds() -> anyhow::Result<()> { + let p = PmonitorTestRunner::new(); + p.create_pcli_wallets()?; + let _network = p.start_devnet()?; + // Run audit once, to confirm compliance on clean slate. + p.initialize_pmonitor()?; + p.pmonitor_audit()?; + + // Create an empty wallet, with no genesis funds, to which we'll + // manually send balance. + let alice_pcli_home = p.wallets_dir()?.join("wallet-alice"); + pcli_init_softkms(&alice_pcli_home)?; + + let alice_address = pcli_view_address(&alice_pcli_home)?; + + // Take the second wallet, and send most of its funds of staking tokens to Alice. + let misbehaving_wallet = p.wallets_dir()?.join("wallet-1"); + + let send_cmd = AssertCommand::cargo_bin("pcli")? + .args([ + "--home", + misbehaving_wallet.to_str().unwrap(), + "tx", + "send", + "--to", + &alice_address.to_string(), + "900penumbra", + ]) + .output() + .expect("failed to execute sending tx to alice wallet"); + assert!(send_cmd.status.success(), "failed to send tx to alice"); + + // Now re-run the audit tool: it should report failure, via a non-zero exit code, + // because of the missing funds. + let result = p.pmonitor_audit(); + assert!( + result.is_err(), + "expected pmonitor to fail due to missing funds" + ); + Ok(()) +} + +#[ignore] +#[test] +/// Tests a happy path for `pmonitor`: a single wallet has sent all its funds +/// to non-genesis account, via `pcli tx send` rather than `pcli migrate balance`, +/// but the receiving wallet then sent those funds back. +/// In this case, `pmonitor` should exit zero. +fn audit_passes_on_misbehaving_wallet_that_sent_funds_but_got_them_back() -> anyhow::Result<()> { + tracing_subscriber::fmt::try_init().ok(); + let p = PmonitorTestRunner::new(); + p.create_pcli_wallets()?; + let _network = p.start_devnet()?; + // Run audit once, to confirm compliance on clean slate. + p.initialize_pmonitor()?; + p.pmonitor_audit()?; + + // Create an empty wallet, with no genesis funds, to which we'll + // manually send balance. + let alice_pcli_home = p.wallets_dir()?.join("wallet-alice"); + pcli_init_softkms(&alice_pcli_home)?; + + let alice_address = pcli_view_address(&alice_pcli_home)?; + + // Take the second wallet, and send most of its funds of staking tokens to Alice. + let misbehaving_wallet = p.wallets_dir()?.join("wallet-1"); + + let send_cmd = AssertCommand::cargo_bin("pcli")? + .args([ + "--home", + misbehaving_wallet.to_str().unwrap(), + "tx", + "send", + "--to", + &alice_address.to_string(), + "900penumbra", + ]) + .output() + .expect("failed to execute sending tx to alice wallet"); + assert!(send_cmd.status.success(), "failed to send tx to alice"); + + // The audit tool detects this state as a failure, since funds are missing. + let result = p.pmonitor_audit(); + assert!( + result.is_err(), + "expected pmonitor to fail due to missing funds" + ); + + // Send the funds from alice back to the misbehaving wallet. + let misbehaving_address = pcli_view_address(&misbehaving_wallet)?; + let refund_cmd = AssertCommand::cargo_bin("pcli")? + .args([ + "--home", + alice_pcli_home.to_str().unwrap(), + "tx", + "send", + "--to", + &misbehaving_address.to_string(), + // We intentionally specify a bit less than we received, to account for gas. + "899.99penumbra", + ]) + .output() + .expect("failed to execute refund tx from alice wallet"); + assert!( + refund_cmd.status.success(), + "failed to send refund tx from alice" + ); + + // The audit tool detects this state as compliant again, because the funds were returned. + p.pmonitor_audit()?; + + Ok(()) +} diff --git a/crates/view/src/storage.rs b/crates/view/src/storage.rs index 8da8a3d74c..330b47c7a9 100644 --- a/crates/view/src/storage.rs +++ b/crates/view/src/storage.rs @@ -1258,11 +1258,11 @@ impl Storage { dbtx.execute( "INSERT INTO notes (note_commitment, address, amount, asset_id, rseed) VALUES (?1, ?2, ?3, ?4, ?5) - ON CONFLICT (note_commitment) - DO UPDATE SET - address = excluded.address, - amount = excluded.amount, - asset_id = excluded.asset_id, + ON CONFLICT (note_commitment) + DO UPDATE SET + address = excluded.address, + amount = excluded.amount, + asset_id = excluded.asset_id, rseed = excluded.rseed", (note_commitment, address, amount, asset_id, rseed), )?; @@ -1432,7 +1432,7 @@ impl Storage { let params_bytes = params.encode_to_vec(); // We expect app_params to be present already but may as well use an upsert dbtx.execute( - "INSERT INTO kv (k, v) VALUES ('app_params', ?1) + "INSERT INTO kv (k, v) VALUES ('app_params', ?1) ON CONFLICT(k) DO UPDATE SET v = excluded.v", [¶ms_bytes[..]], )?; @@ -1460,12 +1460,12 @@ impl Storage { (note_commitment, nullifier, position, height_created, address_index, source, height_spent, tx_hash) VALUES (?1, ?2, ?3, ?4, ?5, ?6, NULL, ?7) ON CONFLICT (note_commitment) - DO UPDATE SET nullifier = excluded.nullifier, - position = excluded.position, - height_created = excluded.height_created, - address_index = excluded.address_index, - source = excluded.source, - height_spent = excluded.height_spent, + DO UPDATE SET nullifier = excluded.nullifier, + position = excluded.position, + height_created = excluded.height_created, + address_index = excluded.address_index, + source = excluded.source, + height_spent = excluded.height_spent, tx_hash = excluded.tx_hash", ( ¬e_commitment, @@ -1492,12 +1492,12 @@ impl Storage { dbtx.execute( "INSERT INTO swaps (swap_commitment, swap, position, nullifier, output_data, height_claimed, source) VALUES (?1, ?2, ?3, ?4, ?5, NULL, ?6) - ON CONFLICT (swap_commitment) - DO UPDATE SET swap = excluded.swap, - position = excluded.position, - nullifier = excluded.nullifier, - output_data = excluded.output_data, - height_claimed = excluded.height_claimed, + ON CONFLICT (swap_commitment) + DO UPDATE SET swap = excluded.swap, + position = excluded.position, + nullifier = excluded.nullifier, + output_data = excluded.output_data, + height_claimed = excluded.height_claimed, source = excluded.source", ( &swap_commitment, @@ -1589,13 +1589,15 @@ impl Storage { let tx_hash_owned = sha2::Sha256::digest(&tx_bytes); let tx_hash = tx_hash_owned.as_slice(); let tx_block_height = filtered_block.height as i64; - let return_address = transaction.decrypt_memo(&fvk).map_or(None, |x| Some(x.return_address().to_vec())); + let decrypted_memo = transaction.decrypt_memo(&fvk).ok(); + let memo_text = decrypted_memo.clone().map_or(None,|x| Some(x.text().to_string())); + let return_address = decrypted_memo.map_or(None, |x| Some(x.return_address().to_vec())); tracing::debug!(tx_hash = ?hex::encode(tx_hash), "recording extended transaction"); dbtx.execute( - "INSERT OR IGNORE INTO tx (tx_hash, tx_bytes, block_height, return_address) VALUES (?1, ?2, ?3, ?4)", - (&tx_hash, &tx_bytes, tx_block_height, return_address), + "INSERT OR IGNORE INTO tx (tx_hash, tx_bytes, block_height, return_address, memo_text) VALUES (?1, ?2, ?3, ?4, ?5)", + (&tx_hash, &tx_bytes, tx_block_height, return_address, memo_text), )?; // Associate all of the spent nullifiers with the transaction by hash. @@ -1756,4 +1758,31 @@ impl Storage { Ok(records) } + + /// Get all transactions with a matching memo text. The `pattern` argument + /// should include SQL wildcards, such as `%` and `_`, to match substrings, + /// e.g. `%foo%`. + pub async fn transactions_matching_memo( + &self, + pattern: String, + ) -> anyhow::Result, Transaction, String)>> { + let pattern = pattern.to_owned(); + tracing::trace!(?pattern, "searching for memos matching"); + let pool = self.pool.clone(); + + spawn_blocking(move || { + pool.get()? + .prepare_cached("SELECT block_height, tx_hash, tx_bytes, memo_text FROM tx WHERE memo_text LIKE ?1 ESCAPE '\\'")? + .query_and_then([pattern], |row| { + let block_height: u64 = row.get("block_height")?; + let tx_hash: Vec = row.get("tx_hash")?; + let tx_bytes: Vec = row.get("tx_bytes")?; + let tx = Transaction::decode(tx_bytes.as_slice())?; + let memo_text: String = row.get("memo_text")?; + anyhow::Ok((block_height, tx_hash, tx, memo_text)) + })? + .collect() + }) + .await? + } } diff --git a/crates/view/src/storage/schema.sql b/crates/view/src/storage/schema.sql index f50e6d12f8..41c52ba820 100644 --- a/crates/view/src/storage/schema.sql +++ b/crates/view/src/storage/schema.sql @@ -54,7 +54,8 @@ CREATE TABLE tx ( tx_hash BLOB PRIMARY KEY NOT NULL, tx_bytes BLOB NOT NULL, block_height BIGINT NOT NULL, - return_address BLOB + return_address BLOB, + memo_text TEXT ); -- This table just records the mapping from note commitments to note plaintexts. diff --git a/deployments/containerfiles/Dockerfile b/deployments/containerfiles/Dockerfile index 5a4ff4b3c2..b33402c255 100644 --- a/deployments/containerfiles/Dockerfile +++ b/deployments/containerfiles/Dockerfile @@ -49,6 +49,7 @@ COPY --from=build-env \ /usr/src/penumbra/target/release/pclientd \ /usr/src/penumbra/target/release/pd \ /usr/src/penumbra/target/release/pindexer \ + /usr/src/penumbra/target/release/pmonitor \ /usr/src/penumbra/target/release/tct-live-edit \ /usr/bin/ diff --git a/deployments/scripts/pmonitor-integration-test.sh b/deployments/scripts/pmonitor-integration-test.sh new file mode 100755 index 0000000000..51d2f3acc5 --- /dev/null +++ b/deployments/scripts/pmonitor-integration-test.sh @@ -0,0 +1,110 @@ +#!/bin/bash +# quick script to test the `pmonitor` tool during review +# set -euo pipefail +set -eu + +>&2 echo "Preparing pmonitor test bed..." +num_wallets=10 + +# ideally we'd use a tempdir but using a hardcoded dir for debugging +# pmonitor_integration_test_dir="$(mktemp -p /tmp -d pmonitor-integration-test.XXXXXX)" +pmonitor_integration_test_dir="/tmp/pmonitor-integration-test" +rm -rf "$pmonitor_integration_test_dir" +mkdir "$pmonitor_integration_test_dir" + +pmonitor_home="${pmonitor_integration_test_dir}/pmonitor" +wallets_dir="${pmonitor_integration_test_dir}/wallets" +wallet_addresses="${pmonitor_integration_test_dir}/addresses.txt" +allocations_csv="${pmonitor_integration_test_dir}/pmonitor-test-allocations.csv" +fvks_json="${pmonitor_integration_test_dir}/fvks.json" +cargo run --release --bin pd -- network unsafe-reset-all || true +cargo run --release --bin pmonitor -- reset || true +mkdir "$wallets_dir" +# override process-compose default port of 8080, which we use for pd +export PC_PORT_NUM="8888" +process-compose down || true + +>&2 echo "creating pcli wallets" +for i in $(seq 1 "$num_wallets"); do + yes | cargo run -q --release --bin pcli -- --home "${wallets_dir}/wallet-$i" init --grpc-url http://localhost:8080 soft-kms generate +done + +# collect addresses +>&2 echo "collecting pcli wallet addresses" +for i in $(seq 1 "$num_wallets"); do + cargo run -q --release --bin pcli -- --home "${wallets_dir}/wallet-$i" view address +done > "$wallet_addresses" + + +# generate genesis allocations +>&2 echo "generating genesis allocations" +printf 'amount,denom,address\n' > "$allocations_csv" +while read -r a ; do + printf '1_000_000__000_000,upenumbra,%s\n1000,test_usd,%s\n' "$a" "$a" +done < "$wallet_addresses" >> "$allocations_csv" + +# generate network data +>&2 echo "generating network data" +cargo run --release --bin pd -- network generate \ + --chain-id penumbra-devnet-pmonitor \ + --unbonding-delay 50 \ + --epoch-duration 50 \ + --proposal-voting-blocks 50 \ + --timeout-commit 3s \ + --gas-price-simple 500 \ + --allocations-input-file "$allocations_csv" + +# run network +>&2 echo "running local devnet" +process-compose up --detached --config deployments/compose/process-compose.yml + +# ensure network is torn down afterward; comment this out if you want +# to interact with the network after tests complete. +trap 'process-compose down || true' EXIT + +# wait for network to come up; lazily sleeping, rather than polling process-compose for "ready" state +sleep 8 + +>&2 echo "collecting fvks" +fd config.toml "$wallets_dir" -x toml get {} full_viewing_key | jq -s > "$fvks_json" + +>&2 echo "initializing pmonitor" +cargo run --release --bin pmonitor -- \ + --home "$pmonitor_home" \ + init --fvks "$fvks_json" --grpc-url http://localhost:8080 + +>&2 echo "running pmonitor audit" +# happy path: we expect this audit to exit 0, because no transfers have occurred yet +cargo run --release --bin pmonitor -- \ + --home "$pmonitor_home" \ + audit + +>&2 echo "exiting BEFORE misbehavior" +exit 0 + + + +>&2 echo "committing misbehavior" +alice_wallet="${wallets_dir}/wallet-alice" +yes | cargo run --quiet --release --bin pcli -- --home "$alice_wallet" init --grpc-url http://localhost:8080 soft-kms generate +alice_address="$(cargo run --quiet --release --bin pcli -- --home "$alice_wallet" view address)" +misbehaving_wallet="${wallets_dir}/wallet-2" +cargo run --quiet --release --bin pcli -- --home "$misbehaving_wallet" tx send --memo "take these tokens, but tell no one" 500penumbra --to "$alice_address" + +>&2 echo "re-running pmonitor audit" +# unhappy path: we expect this audit to exit 10, because a transfer occurred from a monitored wallet +# TODO: make pmonitor exit non-zero when there's bad misbehavior +cargo run --release --bin pmonitor -- \ + --home "$pmonitor_home" \ + audit | tee "${wallets_dir}/pmonitor-log-1.txt" + +printf '#################################\n' +printf 'PMONITOR INTEGRATION TEST SUMMARY\n' +printf '#################################\n' + +if grep -q "Unexpected balance! Balance is less than the genesis balance" "${wallets_dir}/pmonitor-log-1.txt" ; then + >&2 echo "OK: 'pmonitor audit' reported unexpected balance, due to misbehavior" +else + >&2 echo "ERROR: 'pmonitor audit' failed to identify misbehavior, which we know occurred" + exit 1 +fi diff --git a/deployments/scripts/rust-docs b/deployments/scripts/rust-docs index 89428747ff..571418bb2f 100755 --- a/deployments/scripts/rust-docs +++ b/deployments/scripts/rust-docs @@ -31,6 +31,7 @@ cargo +nightly doc --no-deps \ -p pcli \ -p pclientd \ -p pd \ + -p pmonitor \ -p penumbra-app \ -p penumbra-asset \ -p penumbra-community-pool \ diff --git a/flake.nix b/flake.nix index 3c094cb3e5..eba0a4f28f 100644 --- a/flake.nix +++ b/flake.nix @@ -81,7 +81,7 @@ [clang openssl rocksdb]; inherit system PKG_CONFIG_PATH LIBCLANG_PATH ROCKSDB_LIB_DIR; - cargoExtraArgs = "-p pd -p pcli -p pclientd -p pindexer"; + cargoExtraArgs = "-p pd -p pcli -p pclientd -p pindexer -p pmonitor"; meta = { description = "A fully private proof-of-stake network and decentralized exchange for the Cosmos ecosystem"; homepage = "https://penumbra.zone"; @@ -137,6 +137,8 @@ pclientd.program = "${penumbra}/bin/pclientd"; pindexer.type = "app"; pindexer.program = "${penumbra}/bin/pindexer"; + pmonitor.type = "app"; + pmonitor.program = "${penumbra}/bin/pmonitor"; cometbft.type = "app"; cometbft.program = "${cometbft}/bin/cometbft"; }; diff --git a/justfile b/justfile index e7c72fe9c6..bb5f5d29d6 100644 --- a/justfile +++ b/justfile @@ -2,6 +2,15 @@ default: @just --list +# Run integration tests for pmonitor tool +test-pmonitor: + # prebuild cargo binaries required for integration tests + cargo -q build --package pcli --package pd --package pmonitor + cargo -q run --release --bin pd -- network unsafe-reset-all + rm -rf /tmp/pmonitor-integration-test + cargo nextest run -p pmonitor --run-ignored=ignored-only --test-threads 1 + # cargo test -p pmonitor -- --ignored --test-threads 1 --nocapture + # Creates and runs a local devnet with solo validator. Includes ancillary services # like metrics, postgres for storing ABCI events, and pindexer for munging those events. dev: From ac847437effc7a5c1cc6c677da98f4cc35c53111 Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Fri, 18 Oct 2024 09:45:55 -0700 Subject: [PATCH 41/43] docs: add testing steps to PR template --- .github/pull_request_template.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4baa431c19..464c53b8d1 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,9 +1,16 @@ ## Describe your changes + + ## Issue ticket number and link ## Checklist before requesting a review +- [ ] I have added guiding text to explain how a reviewer should test these changes. + - [ ] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > REPLACE THIS TEXT WITH RATIONALE (CAN BE BRIEF) From 112bb2461af7d86a6b16177c43fe9f11249b69a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=BAc=C3=A1s=20Meier?= Date: Mon, 21 Oct 2024 09:05:38 -0700 Subject: [PATCH 42/43] pindexer: indexing for insights dashboard (#4898) Closes #4896. This adds the requisite tables, with a component parametrized by the numeraire for price information. Price information uses candlestick data for generating the price, so the recently added event will need to be used there. This also backfills in rich domain types into old crates as necessary, for convenience. Also closes #4883, since at this point we've refactored out all requisite schemas. ## Checklist before requesting a review - [x] If this code contains consensus-breaking changes, I have added the "consensus-breaking" label. Otherwise, I declare my belief that there are not consensus-breaking changes, for the following reason: > indexing only --- Cargo.lock | 1 + crates/bin/pindexer/Cargo.toml | 1 + crates/bin/pindexer/src/indexer_ext.rs | 9 + crates/bin/pindexer/src/insights/mod.rs | 506 ++++++++++++++++++ crates/bin/pindexer/src/insights/schema.sql | 54 ++ crates/bin/pindexer/src/lib.rs | 1 + crates/core/component/fee/src/component.rs | 17 +- crates/core/component/fee/src/event.rs | 47 ++ .../core/component/funding/src/component.rs | 30 +- crates/core/component/funding/src/event.rs | 50 +- .../src/component/action_handler/output.rs | 9 +- .../src/component/action_handler/spend.rs | 9 +- .../shielded-pool/src/component/transfer.rs | 173 +++--- .../core/component/shielded-pool/src/event.rs | 309 +++++++++-- .../src/component/action_handler/delegate.rs | 4 +- .../component/action_handler/undelegate.rs | 4 +- .../component/stake/src/component/stake.rs | 17 +- .../validator_handler/uptime_tracker.rs | 4 +- .../validator_handler/validator_manager.rs | 32 +- .../validator_handler/validator_store.rs | 35 +- crates/core/component/stake/src/event.rs | 476 +++++++++++++--- crates/core/component/stake/src/lib.rs | 2 +- 22 files changed, 1528 insertions(+), 262 deletions(-) create mode 100644 crates/bin/pindexer/src/insights/mod.rs create mode 100644 crates/bin/pindexer/src/insights/schema.sql diff --git a/Cargo.lock b/Cargo.lock index 965eea0d35..62d71d39c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5824,6 +5824,7 @@ dependencies = [ "penumbra-auction", "penumbra-dex", "penumbra-fee", + "penumbra-funding", "penumbra-governance", "penumbra-keys", "penumbra-num", diff --git a/crates/bin/pindexer/Cargo.toml b/crates/bin/pindexer/Cargo.toml index 610d43e780..24ee0b3531 100644 --- a/crates/bin/pindexer/Cargo.toml +++ b/crates/bin/pindexer/Cargo.toml @@ -22,6 +22,7 @@ penumbra-app = {workspace = true, default-features = false} penumbra-auction = {workspace = true, default-features = false} penumbra-dex = {workspace = true, default-features = false} penumbra-fee = {workspace = true, default-features = false} +penumbra-funding = {workspace = true, default-features = false} penumbra-keys = {workspace = true, default-features = false} penumbra-governance = {workspace = true, default-features = false} penumbra-num = {workspace = true, default-features = false} diff --git a/crates/bin/pindexer/src/indexer_ext.rs b/crates/bin/pindexer/src/indexer_ext.rs index 950000fdd3..90600e43ab 100644 --- a/crates/bin/pindexer/src/indexer_ext.rs +++ b/crates/bin/pindexer/src/indexer_ext.rs @@ -1,3 +1,5 @@ +use std::str::FromStr; + pub trait IndexerExt: Sized { fn with_default_penumbra_app_views(self) -> Self; } @@ -13,5 +15,12 @@ impl IndexerExt for cometindex::Indexer { .with_index(crate::dex_ex::Component::new()) .with_index(crate::supply::Component::new()) .with_index(crate::ibc::Component::new()) + .with_index(crate::insights::Component::new( + penumbra_asset::asset::Id::from_str( + // USDC + "passet1w6e7fvgxsy6ccy3m8q0eqcuyw6mh3yzqu3uq9h58nu8m8mku359spvulf6", + ) + .ok(), + )) } } diff --git a/crates/bin/pindexer/src/insights/mod.rs b/crates/bin/pindexer/src/insights/mod.rs new file mode 100644 index 0000000000..221c64eb25 --- /dev/null +++ b/crates/bin/pindexer/src/insights/mod.rs @@ -0,0 +1,506 @@ +use std::{collections::BTreeMap, iter}; + +use cometindex::{async_trait, AppView, ContextualizedEvent, PgTransaction}; +use penumbra_app::genesis::Content; +use penumbra_asset::{asset, STAKING_TOKEN_ASSET_ID}; +use penumbra_dex::{ + event::{EventArbExecution, EventCandlestickData}, + DirectedTradingPair, +}; +use penumbra_fee::event::EventBlockFees; +use penumbra_funding::event::EventFundingStreamReward; +use penumbra_num::Amount; +use penumbra_proto::{event::EventDomainType, DomainType, Name}; +use penumbra_shielded_pool::event::{ + EventInboundFungibleTokenTransfer, EventOutboundFungibleTokenRefund, + EventOutboundFungibleTokenTransfer, +}; +use penumbra_stake::{ + event::{EventDelegate, EventRateDataChange, EventUndelegate}, + validator::Validator, + IdentityKey, +}; +use sqlx::PgPool; + +use crate::parsing::parse_content; + +#[derive(Debug, Clone, Copy)] +struct ValidatorSupply { + um: u64, + rate_bps2: u64, +} + +async fn modify_validator_supply( + dbtx: &mut PgTransaction<'_>, + height: u64, + ik: IdentityKey, + f: Box anyhow::Result + Send + 'static>, +) -> anyhow::Result { + let ik_text = ik.to_string(); + let supply = { + let row: Option<(i64, i64)> = sqlx::query_as(" + SELECT um, rate_bps2 FROM _insights_validators WHERE validator_id = $1 ORDER BY height DESC LIMIT 1 + ").bind(&ik_text).fetch_optional(dbtx.as_mut()).await?; + let row = row.unwrap_or((0i64, 1_0000_0000i64)); + ValidatorSupply { + um: u64::try_from(row.0)?, + rate_bps2: u64::try_from(row.1)?, + } + }; + let new_supply = f(supply)?; + sqlx::query( + r#" + INSERT INTO _insights_validators + VALUES ($1, $2, $3, $4) + ON CONFLICT (validator_id, height) DO UPDATE SET + um = excluded.um, + rate_bps2 = excluded.rate_bps2 + "#, + ) + .bind(&ik_text) + .bind(i64::try_from(height)?) + .bind(i64::try_from(new_supply.um)?) + .bind(i64::try_from(new_supply.rate_bps2)?) + .execute(dbtx.as_mut()) + .await?; + Ok(i64::try_from(new_supply.um)? - i64::try_from(supply.um)?) +} + +#[derive(Default, Debug, Clone, Copy)] +struct Supply { + total: u64, + staked: u64, + price: Option, +} + +async fn modify_supply( + dbtx: &mut PgTransaction<'_>, + height: u64, + price_numeraire: Option, + f: Box anyhow::Result + Send + 'static>, +) -> anyhow::Result<()> { + let supply: Supply = { + let row: Option<(i64, i64, Option)> = sqlx::query_as( + "SELECT total, staked, price FROM insights_supply ORDER BY HEIGHT DESC LIMIT 1", + ) + .fetch_optional(dbtx.as_mut()) + .await?; + row.map(|(total, staked, price)| { + anyhow::Result::<_>::Ok(Supply { + total: total.try_into()?, + staked: staked.try_into()?, + price, + }) + }) + .transpose()? + .unwrap_or_default() + }; + let supply = f(supply)?; + sqlx::query( + r#" + INSERT INTO + insights_supply(height, total, staked, price, price_numeraire_asset_id) + VALUES ($1, $2, $3, $5, $4) + ON CONFLICT (height) DO UPDATE SET + total = excluded.total, + staked = excluded.staked, + price = excluded.price, + price_numeraire_asset_id = excluded.price_numeraire_asset_id + "#, + ) + .bind(i64::try_from(height)?) + .bind(i64::try_from(supply.total)?) + .bind(i64::try_from(supply.staked)?) + .bind(price_numeraire.map(|x| x.to_bytes())) + .bind(supply.price) + .execute(dbtx.as_mut()) + .await?; + Ok(()) +} + +#[derive(Debug, Clone, Copy, PartialEq)] +enum DepositorExisted { + Yes, + No, +} + +async fn register_depositor( + dbtx: &mut PgTransaction<'_>, + asset_id: asset::Id, + address: &str, +) -> anyhow::Result { + let exists: bool = sqlx::query_scalar( + "SELECT EXISTS (SELECT 1 FROM _insights_shielded_pool_depositors WHERE asset_id = $1 AND address = $2)", + ) + .bind(asset_id.to_bytes()) + .bind(address) + .fetch_one(dbtx.as_mut()) + .await?; + if exists { + return Ok(DepositorExisted::Yes); + } + sqlx::query("INSERT INTO _insights_shielded_pool_depositors VALUES ($1, $2)") + .bind(asset_id.to_bytes()) + .bind(address) + .execute(dbtx.as_mut()) + .await?; + Ok(DepositorExisted::No) +} + +async fn asset_flow( + dbtx: &mut PgTransaction<'_>, + asset_id: asset::Id, + height: u64, + flow: i128, + depositor_existed: DepositorExisted, +) -> anyhow::Result<()> { + let asset_pool: Option<(String, String, i32)> = sqlx::query_as("SELECT total_value, current_value, unique_depositors FROM insights_shielded_pool WHERE asset_id = $1 ORDER BY height DESC LIMIT 1").bind(asset_id.to_bytes()).fetch_optional(dbtx.as_mut()).await?; + let mut asset_pool = asset_pool + .map(|(t, c, u)| { + anyhow::Result::<(i128, i128, i32)>::Ok(( + i128::from_str_radix(&t, 10)?, + i128::from_str_radix(&c, 10)?, + u, + )) + }) + .transpose()? + .unwrap_or((0i128, 0i128, 0i32)); + asset_pool.0 += flow.abs(); + asset_pool.1 += flow; + asset_pool.2 += match depositor_existed { + DepositorExisted::Yes => 0, + DepositorExisted::No => 1, + }; + sqlx::query( + r#" + INSERT INTO insights_shielded_pool + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (asset_id, height) DO UPDATE SET + total_value = excluded.total_value, + current_value = excluded.current_value, + unique_depositors = excluded.unique_depositors + "#, + ) + .bind(asset_id.to_bytes()) + .bind(i64::try_from(height)?) + .bind(asset_pool.0.to_string()) + .bind(asset_pool.1.to_string()) + .bind(asset_pool.2) + .execute(dbtx.as_mut()) + .await?; + Ok(()) +} + +#[derive(Debug)] +pub struct Component { + price_numeraire: Option, +} + +impl Component { + /// This component depends on a reference asset for the total supply price. + pub fn new(price_numeraire: Option) -> Self { + Self { price_numeraire } + } +} + +/// Add the initial native token supply. +async fn add_genesis_native_token_allocation_supply<'a>( + dbtx: &mut PgTransaction<'a>, + content: &Content, +) -> anyhow::Result<()> { + fn content_mints(content: &Content) -> BTreeMap { + let community_pool_mint = iter::once(( + *STAKING_TOKEN_ASSET_ID, + content.community_pool_content.initial_balance.amount, + )); + let allocation_mints = content + .shielded_pool_content + .allocations + .iter() + .map(|allocation| { + let value = allocation.value(); + (value.asset_id, value.amount) + }); + + let mut out = BTreeMap::new(); + for (id, amount) in community_pool_mint.chain(allocation_mints) { + out.entry(id).and_modify(|x| *x += amount).or_insert(amount); + } + out + } + + let mints = content_mints(content); + + let unstaked = u64::try_from( + mints + .get(&*STAKING_TOKEN_ASSET_ID) + .copied() + .unwrap_or_default() + .value(), + )?; + + let mut staked = 0u64; + // at genesis, assume a 1:1 ratio between delegation amount and native token amount. + for val in &content.stake_content.validators { + let val = Validator::try_from(val.clone())?; + let delegation_amount: u64 = mints + .get(&val.token().id()) + .cloned() + .unwrap_or_default() + .value() + .try_into()?; + staked += delegation_amount; + modify_validator_supply( + dbtx, + 0, + val.identity_key, + Box::new(move |_| { + Ok(ValidatorSupply { + um: delegation_amount, + rate_bps2: 1_0000_0000, + }) + }), + ) + .await?; + } + + modify_supply( + dbtx, + 0, + None, + Box::new(move |_| { + Ok(Supply { + total: unstaked + staked, + staked, + price: None, + }) + }), + ) + .await?; + + Ok(()) +} +#[async_trait] +impl AppView for Component { + async fn init_chain( + &self, + dbtx: &mut PgTransaction, + app_state: &serde_json::Value, + ) -> Result<(), anyhow::Error> { + for statement in include_str!("schema.sql").split(";") { + sqlx::query(statement).execute(dbtx.as_mut()).await?; + } + + // decode the initial supply from the genesis + // initial app state is not recomputed from events, because events are not emitted in init_chain. + // instead, the indexer directly parses the genesis. + add_genesis_native_token_allocation_supply(dbtx, &parse_content(app_state.clone())?) + .await?; + Ok(()) + } + + fn is_relevant(&self, type_str: &str) -> bool { + [ + ::Proto::full_name(), + ::Proto::full_name(), + ::Proto::full_name(), + ::Proto::full_name(), + ::Proto::full_name(), + ::Proto::full_name(), + ::Proto::full_name(), + ::Proto::full_name(), + ::Proto::full_name(), + ::Proto::full_name(), + ] + .into_iter() + .any(|x| type_str == x) + } + + async fn index_event( + &self, + dbtx: &mut PgTransaction, + event: &ContextualizedEvent, + _src_db: &PgPool, + ) -> Result<(), anyhow::Error> { + let height = event.block_height; + if let Ok(e) = EventUndelegate::try_from_event(&event.event) { + let delta = modify_validator_supply( + dbtx, + height, + e.identity_key, + Box::new(move |supply| { + Ok(ValidatorSupply { + um: supply.um + u64::try_from(e.amount.value()).expect(""), + ..supply + }) + }), + ) + .await?; + modify_supply( + dbtx, + height, + self.price_numeraire, + Box::new(move |supply| { + // The amount staked has changed, but no inflation has happened. + Ok(Supply { + staked: u64::try_from(i64::try_from(supply.staked)? + delta)?, + ..supply + }) + }), + ) + .await?; + } else if let Ok(e) = EventDelegate::try_from_event(&event.event) { + let delta = modify_validator_supply( + dbtx, + height, + e.identity_key, + Box::new(move |supply| { + Ok(ValidatorSupply { + um: supply.um + u64::try_from(e.amount.value()).expect(""), + ..supply + }) + }), + ) + .await?; + modify_supply( + dbtx, + height, + self.price_numeraire, + Box::new(move |supply| { + Ok(Supply { + staked: u64::try_from(i64::try_from(supply.staked)? + delta)?, + ..supply + }) + }), + ) + .await?; + } else if let Ok(e) = EventRateDataChange::try_from_event(&event.event) { + let delta = modify_validator_supply( + dbtx, + height, + e.identity_key, + Box::new(move |supply| { + // del_um <- um / old_exchange_rate + // um <- del_um * new_exchange_rate + // so + // um <- um * (new_exchange_rate / old_exchange_rate) + // and the bps cancel out. + let um = (u128::from(supply.um) * e.rate_data.validator_exchange_rate.value()) + .checked_div(supply.rate_bps2.into()) + .unwrap_or(0u128) + .try_into()?; + Ok(ValidatorSupply { + um, + rate_bps2: u64::try_from(e.rate_data.validator_exchange_rate.value())?, + }) + }), + ) + .await?; + modify_supply( + dbtx, + height, + self.price_numeraire, + Box::new(move |supply| { + // Value has been created or destroyed! + Ok(Supply { + total: u64::try_from(i64::try_from(supply.total)? + delta)?, + staked: u64::try_from(i64::try_from(supply.staked)? + delta)?, + ..supply + }) + }), + ) + .await?; + } else if let Ok(e) = EventBlockFees::try_from_event(&event.event) { + let value = e.swapped_fee_total.value(); + if value.asset_id == *STAKING_TOKEN_ASSET_ID { + let amount = u64::try_from(value.amount.value())?; + // We consider the tip to be destroyed too, matching the current logic + // DRAGON: if this changes, this code should use the base fee only. + modify_supply( + dbtx, + height, + self.price_numeraire, + Box::new(move |supply| { + Ok(Supply { + total: supply.total + amount, + ..supply + }) + }), + ) + .await?; + } + } else if let Ok(e) = EventArbExecution::try_from_event(&event.event) { + let input = e.swap_execution.input; + let output = e.swap_execution.output; + if input.asset_id == *STAKING_TOKEN_ASSET_ID + && output.asset_id == *STAKING_TOKEN_ASSET_ID + { + let profit = u64::try_from((output.amount - input.amount).value())?; + modify_supply( + dbtx, + height, + self.price_numeraire, + Box::new(move |supply| { + Ok(Supply { + total: supply.total + profit, + ..supply + }) + }), + ) + .await?; + } + } else if let Ok(e) = EventFundingStreamReward::try_from_event(&event.event) { + let amount = u64::try_from(e.reward_amount.value())?; + modify_supply( + dbtx, + height, + self.price_numeraire, + Box::new(move |supply| { + Ok(Supply { + total: supply.total + amount, + ..supply + }) + }), + ) + .await?; + } else if let Ok(e) = EventInboundFungibleTokenTransfer::try_from_event(&event.event) { + if e.value.asset_id != *STAKING_TOKEN_ASSET_ID { + let existed = register_depositor(dbtx, e.value.asset_id, &e.sender).await?; + let flow = i128::try_from(e.value.amount.value())?; + asset_flow(dbtx, e.value.asset_id, height, flow, existed).await?; + } + } else if let Ok(e) = EventOutboundFungibleTokenTransfer::try_from_event(&event.event) { + if e.value.asset_id != *STAKING_TOKEN_ASSET_ID { + let flow = i128::try_from(e.value.amount.value())?; + // For outbound transfers, never increment unique count + asset_flow(dbtx, e.value.asset_id, height, -flow, DepositorExisted::No).await?; + } + } else if let Ok(e) = EventOutboundFungibleTokenRefund::try_from_event(&event.event) { + if e.value.asset_id != *STAKING_TOKEN_ASSET_ID { + let flow = i128::try_from(e.value.amount.value())?; + // For outbound transfers, never increment unique count. + asset_flow(dbtx, e.value.asset_id, height, flow, DepositorExisted::No).await?; + } + } else if let Ok(e) = EventCandlestickData::try_from_event(&event.event) { + if let Some(pn) = self.price_numeraire { + if e.pair == DirectedTradingPair::new(*STAKING_TOKEN_ASSET_ID, pn) { + let price = e.stick.close; + modify_supply( + dbtx, + height, + self.price_numeraire, + Box::new(move |supply| { + Ok(Supply { + price: Some(price), + ..supply + }) + }), + ) + .await?; + } + } + } + tracing::debug!(?event, "unrecognized event"); + Ok(()) + } +} diff --git a/crates/bin/pindexer/src/insights/schema.sql b/crates/bin/pindexer/src/insights/schema.sql new file mode 100644 index 0000000000..a23d67c055 --- /dev/null +++ b/crates/bin/pindexer/src/insights/schema.sql @@ -0,0 +1,54 @@ +-- A table containing updates to the total supply, and market cap. +CREATE TABLE IF NOT EXISTS insights_supply ( + -- The height where the supply was updated. + height BIGINT PRIMARY KEY, + -- The total supply of the staking token at this height. + total BIGINT NOT NULL, + staked BIGINT NOT NULL, + -- Price, if it can be found for whatever numeraire we choose at runtime. + price FLOAT8, + -- The numeraire for the price we've chosen. + price_numeraire_asset_id BYTEA, + -- The market cap, i.e. price * total amount. + market_cap FLOAT8 GENERATED ALWAYS AS (total::FLOAT8 * price) STORED +); + +-- A working table to save the state around validators we need. +-- +-- This is necessary because rate data changes increase the total supply, +-- but don't directly tell us how much the total supply increased. +CREATE TABLE IF NOT EXISTS _insights_validators ( + -- The validator this row concerns. + validator_id TEXT NOT NULL, + -- The height for the supply update. + height BIGINT NOT NULL, + -- The total amount staked with them, in terms of the native token. + um BIGINT NOT NULL, + -- How much native um we get per unit of the delegation token. + rate_bps2 BIGINT NOT NULL, + PRIMARY KEY (validator_id, height) +); + +-- Our internal representation of the shielded pool table. +CREATE TABLE IF NOT EXISTS insights_shielded_pool ( + -- The asset this concerns. + asset_id BYTEA NOT NULL, + height BIGINT NOT NULL, + -- The total value shielded, in terms of that asset. + total_value TEXT NOT NULL, + -- The current value shielded, in terms of that asset. + current_value TEXT NOT NULL, + -- The number of unique depositors. + unique_depositors INT NOT NULL, + PRIMARY KEY (asset_id, height) +); + +-- Unique depositors into the shielded pool +CREATE TABLE IF NOT EXISTS _insights_shielded_pool_depositors ( + asset_id BYTEA NOT NULL, + address TEXT NOT NULL, + PRIMARY KEY (asset_id, address) +); + +CREATE OR REPLACE VIEW insights_shielded_pool_latest AS + SELECT DISTINCT ON (asset_id) * FROM insights_shielded_pool ORDER BY asset_id, height DESC; diff --git a/crates/bin/pindexer/src/lib.rs b/crates/bin/pindexer/src/lib.rs index e2c2d63476..47429f1c10 100644 --- a/crates/bin/pindexer/src/lib.rs +++ b/crates/bin/pindexer/src/lib.rs @@ -6,6 +6,7 @@ pub mod block; pub mod dex; pub mod dex_ex; pub mod ibc; +pub mod insights; mod parsing; pub mod shielded_pool; mod sql; diff --git a/crates/core/component/fee/src/component.rs b/crates/core/component/fee/src/component.rs index e3c80d9f73..25ffa10451 100644 --- a/crates/core/component/fee/src/component.rs +++ b/crates/core/component/fee/src/component.rs @@ -4,12 +4,12 @@ mod view; use std::sync::Arc; -use crate::{genesis, Fee}; +use crate::{event::EventBlockFees, genesis, Fee}; use async_trait::async_trait; use cnidarium::StateWrite; use cnidarium_component::Component; -use penumbra_proto::core::component::fee::v1 as pb; use penumbra_proto::state::StateWriteProto as _; +use penumbra_proto::DomainType as _; use tendermint::abci; use tracing::instrument; @@ -56,11 +56,14 @@ impl Component for FeeComponent { let swapped_total = swapped_base + swapped_tip; - state_ref.record_proto(pb::EventBlockFees { - swapped_fee_total: Some(Fee::from_staking_token_amount(swapped_total).into()), - swapped_base_fee_total: Some(Fee::from_staking_token_amount(swapped_base).into()), - swapped_tip_total: Some(Fee::from_staking_token_amount(swapped_tip).into()), - }); + state_ref.record_proto( + EventBlockFees { + swapped_fee_total: Fee::from_staking_token_amount(swapped_total), + swapped_base_fee_total: Fee::from_staking_token_amount(swapped_base), + swapped_tip_total: Fee::from_staking_token_amount(swapped_tip), + } + .to_proto(), + ); } #[instrument(name = "fee", skip(_state))] diff --git a/crates/core/component/fee/src/event.rs b/crates/core/component/fee/src/event.rs index 8b13789179..5a87c81976 100644 --- a/crates/core/component/fee/src/event.rs +++ b/crates/core/component/fee/src/event.rs @@ -1 +1,48 @@ +use crate::Fee; +use anyhow::{anyhow, Context}; +use penumbra_proto::{core::component::fee::v1 as pb, DomainType, Name as _}; +#[derive(Clone, Debug)] +pub struct EventBlockFees { + pub swapped_fee_total: Fee, + pub swapped_base_fee_total: Fee, + pub swapped_tip_total: Fee, +} + +impl TryFrom for EventBlockFees { + type Error = anyhow::Error; + + fn try_from(value: pb::EventBlockFees) -> Result { + fn inner(value: pb::EventBlockFees) -> anyhow::Result { + Ok(EventBlockFees { + swapped_fee_total: value + .swapped_fee_total + .ok_or(anyhow!("missing `swapped_fee_total`"))? + .try_into()?, + swapped_base_fee_total: value + .swapped_base_fee_total + .ok_or(anyhow!("missing `swapped_base_fee_total`"))? + .try_into()?, + swapped_tip_total: value + .swapped_tip_total + .ok_or(anyhow!("missing `swapped_tip_total`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventBlockFees::NAME)) + } +} + +impl From for pb::EventBlockFees { + fn from(value: EventBlockFees) -> Self { + Self { + swapped_fee_total: Some(value.swapped_fee_total.into()), + swapped_base_fee_total: Some(value.swapped_base_fee_total.into()), + swapped_tip_total: Some(value.swapped_tip_total.into()), + } + } +} + +impl DomainType for EventBlockFees { + type Proto = pb::EventBlockFees; +} diff --git a/crates/core/component/funding/src/component.rs b/crates/core/component/funding/src/component.rs index b459eac239..a0ea863ec7 100644 --- a/crates/core/component/funding/src/component.rs +++ b/crates/core/component/funding/src/component.rs @@ -6,7 +6,7 @@ pub use metrics::register_metrics; /* Component implementation */ use penumbra_asset::{Value, STAKING_TOKEN_ASSET_ID}; -use penumbra_proto::StateWriteProto; +use penumbra_proto::{DomainType, StateWriteProto}; use penumbra_stake::component::validator_handler::ValidatorDataRead; pub use view::{StateReadExt, StateWriteExt}; @@ -19,7 +19,7 @@ use cnidarium_component::Component; use tendermint::v0_37::abci; use tracing::instrument; -use crate::{event::funding_stream_reward, genesis}; +use crate::{event::EventFundingStreamReward, genesis}; pub struct Funding {} @@ -112,11 +112,14 @@ impl Component for Funding { // If the recipient is an address, mint a note to that address Recipient::Address(address) => { // Record the funding stream reward event: - state.record_proto(funding_stream_reward( - address.to_string(), - base_rate.epoch_index, - reward_amount_for_stream.into(), - )); + state.record_proto( + EventFundingStreamReward { + recipient: address.to_string(), + epoch_index: base_rate.epoch_index, + reward_amount: reward_amount_for_stream, + } + .to_proto(), + ); state .mint_note( @@ -134,11 +137,14 @@ impl Component for Funding { // If the recipient is the Community Pool, deposit the funds into the Community Pool Recipient::CommunityPool => { // Record the funding stream reward event: - state.record_proto(funding_stream_reward( - "community-pool".to_string(), - base_rate.epoch_index, - reward_amount_for_stream.into(), - )); + state.record_proto( + EventFundingStreamReward { + recipient: "community-pool".to_string(), + epoch_index: base_rate.epoch_index, + reward_amount: reward_amount_for_stream, + } + .to_proto(), + ); state .community_pool_deposit(Value { diff --git a/crates/core/component/funding/src/event.rs b/crates/core/component/funding/src/event.rs index c40df99e9a..6d646bfe53 100644 --- a/crates/core/component/funding/src/event.rs +++ b/crates/core/component/funding/src/event.rs @@ -1,14 +1,42 @@ +use anyhow::{anyhow, Context}; use penumbra_num::Amount; -use penumbra_proto::penumbra::core::component::funding::v1 as pb; - -pub fn funding_stream_reward( - recipient: String, - epoch_index: u64, - reward_amount: Amount, -) -> pb::EventFundingStreamReward { - pb::EventFundingStreamReward { - recipient, - epoch_index, - reward_amount: Some(reward_amount.into()), +use penumbra_proto::{penumbra::core::component::funding::v1 as pb, DomainType, Name as _}; + +#[derive(Clone, Debug)] +pub struct EventFundingStreamReward { + pub recipient: String, + pub epoch_index: u64, + pub reward_amount: Amount, +} + +impl TryFrom for EventFundingStreamReward { + type Error = anyhow::Error; + + fn try_from(value: pb::EventFundingStreamReward) -> Result { + fn inner(value: pb::EventFundingStreamReward) -> anyhow::Result { + Ok(EventFundingStreamReward { + recipient: value.recipient, + epoch_index: value.epoch_index, + reward_amount: value + .reward_amount + .ok_or(anyhow!("missing `reward_amount`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventFundingStreamReward::NAME)) } } + +impl From for pb::EventFundingStreamReward { + fn from(value: EventFundingStreamReward) -> Self { + Self { + recipient: value.recipient, + epoch_index: value.epoch_index, + reward_amount: Some(value.reward_amount.into()), + } + } +} + +impl DomainType for EventFundingStreamReward { + type Proto = pb::EventFundingStreamReward; +} diff --git a/crates/core/component/shielded-pool/src/component/action_handler/output.rs b/crates/core/component/shielded-pool/src/component/action_handler/output.rs index 330a39c0e0..18a4515c9f 100644 --- a/crates/core/component/shielded-pool/src/component/action_handler/output.rs +++ b/crates/core/component/shielded-pool/src/component/action_handler/output.rs @@ -3,7 +3,7 @@ use async_trait::async_trait; use cnidarium::StateWrite; use cnidarium_component::ActionHandler; use penumbra_proof_params::OUTPUT_PROOF_VERIFICATION_KEY; -use penumbra_proto::StateWriteProto as _; +use penumbra_proto::{DomainType as _, StateWriteProto as _}; use penumbra_sct::component::source::SourceContext; use crate::{component::NoteManager, event, output::OutputProofPublic, Output}; @@ -34,7 +34,12 @@ impl ActionHandler for Output { .add_note_payload(self.body.note_payload.clone(), source) .await; - state.record_proto(event::output(&self.body.note_payload)); + state.record_proto( + event::EventOutput { + note_commitment: self.body.note_payload.note_commitment, + } + .to_proto(), + ); Ok(()) } diff --git a/crates/core/component/shielded-pool/src/component/action_handler/spend.rs b/crates/core/component/shielded-pool/src/component/action_handler/spend.rs index ee4c1ace85..b3da310650 100644 --- a/crates/core/component/shielded-pool/src/component/action_handler/spend.rs +++ b/crates/core/component/shielded-pool/src/component/action_handler/spend.rs @@ -3,7 +3,7 @@ use async_trait::async_trait; use cnidarium::StateWrite; use cnidarium_component::ActionHandler; use penumbra_proof_params::SPEND_PROOF_VERIFICATION_KEY; -use penumbra_proto::StateWriteProto as _; +use penumbra_proto::{DomainType, StateWriteProto as _}; use penumbra_sct::component::{ source::SourceContext, tree::{SctManager, VerificationExt}, @@ -49,7 +49,12 @@ impl ActionHandler for Spend { state.nullify(self.body.nullifier, source).await; // Also record an ABCI event for transaction indexing. - state.record_proto(event::spend(&self.body.nullifier)); + state.record_proto( + event::EventSpend { + nullifier: self.body.nullifier, + } + .to_proto(), + ); Ok(()) } diff --git a/crates/core/component/shielded-pool/src/component/transfer.rs b/crates/core/component/shielded-pool/src/component/transfer.rs index 18308e82ca..88c0e799ab 100644 --- a/crates/core/component/shielded-pool/src/component/transfer.rs +++ b/crates/core/component/shielded-pool/src/component/transfer.rs @@ -2,7 +2,8 @@ use std::str::FromStr; use crate::{ component::{AssetRegistry, NoteManager}, - event, Ics20Withdrawal, + event::{self, FungibleTokenTransferPacketMetadata}, + Ics20Withdrawal, }; use anyhow::{Context, Result}; use async_trait::async_trait; @@ -25,8 +26,8 @@ use penumbra_ibc::component::ChannelStateReadExt; use penumbra_keys::Address; use penumbra_num::Amount; use penumbra_proto::{ - core::component::shielded_pool::v1::FungibleTokenTransferPacketMetadata, - penumbra::core::component::ibc::v1::FungibleTokenPacketData, StateReadProto, StateWriteProto, + penumbra::core::component::ibc::v1::FungibleTokenPacketData, DomainType as _, StateReadProto, + StateWriteProto, }; use penumbra_sct::CommitmentSource; @@ -118,23 +119,26 @@ pub trait Ics20TransferWriteExt: StateWrite { ), new_value_balance, ); - self.record_proto(event::outbound_fungible_token_transfer( - Value { - amount: withdrawal.amount, - asset_id: withdrawal.denom.id(), - }, - &withdrawal.return_address, - withdrawal.destination_chain_address.clone(), - FungibleTokenTransferPacketMetadata { - channel: withdrawal.source_channel.0.clone(), - sequence: self - .get_send_sequence( - &withdrawal.source_channel, - &checked_packet.source_port(), - ) - .await?, - }, - )); + self.record_proto( + event::EventOutboundFungibleTokenTransfer { + value: Value { + amount: withdrawal.amount, + asset_id: withdrawal.denom.id(), + }, + sender: withdrawal.return_address.clone(), + receiver: withdrawal.destination_chain_address.clone(), + meta: FungibleTokenTransferPacketMetadata { + channel: withdrawal.source_channel.0.clone(), + sequence: self + .get_send_sequence( + &withdrawal.source_channel, + &checked_packet.source_port(), + ) + .await?, + }, + } + .to_proto(), + ); } else { // receiver is the source, burn utxos @@ -168,23 +172,26 @@ pub trait Ics20TransferWriteExt: StateWrite { ), new_value_balance, ); - self.record_proto(event::outbound_fungible_token_transfer( - Value { - amount: withdrawal.amount, - asset_id: withdrawal.denom.id(), - }, - &withdrawal.return_address, - withdrawal.destination_chain_address.clone(), - FungibleTokenTransferPacketMetadata { - channel: withdrawal.source_channel.0.clone(), - sequence: self - .get_send_sequence( - &withdrawal.source_channel, - &checked_packet.source_port(), - ) - .await?, - }, - )); + self.record_proto( + event::EventOutboundFungibleTokenTransfer { + value: Value { + amount: withdrawal.amount, + asset_id: withdrawal.denom.id(), + }, + sender: withdrawal.return_address.clone(), + receiver: withdrawal.destination_chain_address.clone(), + meta: FungibleTokenTransferPacketMetadata { + channel: withdrawal.source_channel.0.clone(), + sequence: self + .get_send_sequence( + &withdrawal.source_channel, + &checked_packet.source_port(), + ) + .await?, + }, + } + .to_proto(), + ); } self.send_packet_execute(checked_packet).await; @@ -388,15 +395,18 @@ async fn recv_transfer_packet_inner( state_key::ics20_value_balance::by_asset_id(&msg.packet.chan_on_b, &denom.id()), new_value_balance, ); - state.record_proto(event::inbound_fungible_token_transfer( - value, - packet_data.sender.clone(), - &receiver_address, - FungibleTokenTransferPacketMetadata { - channel: msg.packet.chan_on_a.0.clone(), - sequence: msg.packet.sequence.0, - }, - )); + state.record_proto( + event::EventInboundFungibleTokenTransfer { + value, + sender: packet_data.sender.clone(), + receiver: receiver_address, + meta: FungibleTokenTransferPacketMetadata { + channel: msg.packet.chan_on_a.0.clone(), + sequence: msg.packet.sequence.0, + }, + } + .to_proto(), + ); } else { // create new denom: // @@ -448,15 +458,18 @@ async fn recv_transfer_packet_inner( state_key::ics20_value_balance::by_asset_id(&msg.packet.chan_on_b, &denom.id()), new_value_balance, ); - state.record_proto(event::inbound_fungible_token_transfer( - value, - packet_data.sender.clone(), - &receiver_address, - FungibleTokenTransferPacketMetadata { - channel: msg.packet.chan_on_a.0.clone(), - sequence: msg.packet.sequence.0, - }, - )); + state.record_proto( + event::EventInboundFungibleTokenTransfer { + value, + sender: packet_data.sender.clone(), + receiver: receiver_address, + meta: FungibleTokenTransferPacketMetadata { + channel: msg.packet.chan_on_a.0.clone(), + sequence: msg.packet.sequence.0, + }, + } + .to_proto(), + ); } Ok(()) @@ -527,17 +540,20 @@ async fn refund_tokens( state_key::ics20_value_balance::by_asset_id(&packet.chan_on_a, &denom.id()), new_value_balance, ); - state.record_proto(event::outbound_fungible_token_refund( - value, - &receiver, // note, this comes from packet_data.sender - packet_data.receiver.clone(), - reason, - // Use the destination channel, i.e. our name for it, to be consistent across events. - FungibleTokenTransferPacketMetadata { - channel: packet.chan_on_b.0.clone(), - sequence: packet.sequence.0, - }, - )); + state.record_proto( + event::EventOutboundFungibleTokenRefund { + value, + sender: receiver, // note, this comes from packet_data.sender + receiver: packet_data.receiver.clone(), + reason, + // Use the destination channel, i.e. our name for it, to be consistent across events. + meta: FungibleTokenTransferPacketMetadata { + channel: packet.chan_on_b.0.clone(), + sequence: packet.sequence.0, + }, + } + .to_proto(), + ); } else { let value_balance: Amount = state .get(&state_key::ics20_value_balance::by_asset_id( @@ -566,17 +582,20 @@ async fn refund_tokens( state_key::ics20_value_balance::by_asset_id(&packet.chan_on_a, &denom.id()), new_value_balance, ); - // note, order flipped relative to the event. - state.record_proto(event::outbound_fungible_token_refund( - value, - &receiver, // note, this comes from packet_data.sender - packet_data.receiver.clone(), - reason, - FungibleTokenTransferPacketMetadata { - channel: packet.chan_on_b.0.clone(), - sequence: packet.sequence.0, - }, - )); + state.record_proto( + event::EventOutboundFungibleTokenRefund { + value, + sender: receiver, // note, this comes from packet_data.sender + receiver: packet_data.receiver.clone(), + reason, + // Use the destination channel, i.e. our name for it, to be consistent across events. + meta: FungibleTokenTransferPacketMetadata { + channel: packet.chan_on_b.0.clone(), + sequence: packet.sequence.0, + }, + } + .to_proto(), + ); } Ok(()) diff --git a/crates/core/component/shielded-pool/src/event.rs b/crates/core/component/shielded-pool/src/event.rs index f13f1398c0..e0f14b67fa 100644 --- a/crates/core/component/shielded-pool/src/event.rs +++ b/crates/core/component/shielded-pool/src/event.rs @@ -1,79 +1,274 @@ +use anyhow::{anyhow, Context}; use penumbra_asset::Value; use penumbra_keys::Address; -use penumbra_proto::core::component::shielded_pool::v1::{ - event_outbound_fungible_token_refund::Reason, EventInboundFungibleTokenTransfer, - EventOutboundFungibleTokenRefund, EventOutboundFungibleTokenTransfer, EventOutput, EventSpend, - FungibleTokenTransferPacketMetadata, -}; +use penumbra_proto::{core::component::shielded_pool::v1 as pb, DomainType}; use penumbra_sct::Nullifier; +use prost::Name as _; -use crate::NotePayload; +use crate::note::StateCommitment; -// These are sort of like the proto/domain type From impls, because -// we don't have separate domain types for the events (yet, possibly ever). +// // These are sort of like the proto/domain type From impls, because +// // we don't have separate domain types for the events (yet, possibly ever). +// Narrator: we did in fact need the separate domain types. -pub fn spend(nullifier: &Nullifier) -> EventSpend { - EventSpend { - nullifier: Some((*nullifier).into()), +#[derive(Clone, Debug)] +pub struct EventSpend { + pub nullifier: Nullifier, +} + +impl TryFrom for EventSpend { + type Error = anyhow::Error; + + fn try_from(value: pb::EventSpend) -> Result { + fn inner(value: pb::EventSpend) -> anyhow::Result { + Ok(EventSpend { + nullifier: value + .nullifier + .ok_or(anyhow!("missing `nullifier`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventSpend::NAME)) } } -pub fn output(note_payload: &NotePayload) -> EventOutput { - EventOutput { - note_commitment: Some(note_payload.note_commitment.into()), +impl From for pb::EventSpend { + fn from(value: EventSpend) -> Self { + Self { + nullifier: Some(value.nullifier.into()), + } + } +} + +impl DomainType for EventSpend { + type Proto = pb::EventSpend; +} + +#[derive(Clone, Debug)] +pub struct EventOutput { + pub note_commitment: StateCommitment, +} + +impl TryFrom for EventOutput { + type Error = anyhow::Error; + + fn try_from(value: pb::EventOutput) -> Result { + fn inner(value: pb::EventOutput) -> anyhow::Result { + Ok(EventOutput { + note_commitment: value + .note_commitment + .ok_or(anyhow!("missing `note_commitment`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventOutput::NAME)) } } -pub fn outbound_fungible_token_transfer( - value: Value, - sender: &Address, - receiver: String, - meta: FungibleTokenTransferPacketMetadata, -) -> EventOutboundFungibleTokenTransfer { - EventOutboundFungibleTokenTransfer { - value: Some(value.into()), - sender: Some(sender.into()), - receiver, - meta: Some(meta), +impl From for pb::EventOutput { + fn from(value: EventOutput) -> Self { + Self { + note_commitment: Some(value.note_commitment.into()), + } } } +impl DomainType for EventOutput { + type Proto = pb::EventOutput; +} + +#[derive(Clone, Debug)] +pub struct FungibleTokenTransferPacketMetadata { + pub channel: String, + pub sequence: u64, +} + +impl TryFrom for FungibleTokenTransferPacketMetadata { + type Error = anyhow::Error; + + fn try_from(value: pb::FungibleTokenTransferPacketMetadata) -> Result { + fn inner( + value: pb::FungibleTokenTransferPacketMetadata, + ) -> anyhow::Result { + Ok(FungibleTokenTransferPacketMetadata { + channel: value.channel, + sequence: value.sequence, + }) + } + inner(value).context(format!( + "parsing {}", + pb::FungibleTokenTransferPacketMetadata::NAME + )) + } +} + +impl From for pb::FungibleTokenTransferPacketMetadata { + fn from(value: FungibleTokenTransferPacketMetadata) -> Self { + Self { + channel: value.channel, + sequence: value.sequence, + } + } +} + +impl DomainType for FungibleTokenTransferPacketMetadata { + type Proto = pb::FungibleTokenTransferPacketMetadata; +} + +#[derive(Clone, Debug)] +pub struct EventOutboundFungibleTokenTransfer { + pub value: Value, + pub sender: Address, + pub receiver: String, + pub meta: FungibleTokenTransferPacketMetadata, +} + +impl TryFrom for EventOutboundFungibleTokenTransfer { + type Error = anyhow::Error; + + fn try_from(value: pb::EventOutboundFungibleTokenTransfer) -> Result { + fn inner( + value: pb::EventOutboundFungibleTokenTransfer, + ) -> anyhow::Result { + Ok(EventOutboundFungibleTokenTransfer { + value: value.value.ok_or(anyhow!("missing `value`"))?.try_into()?, + sender: value + .sender + .ok_or(anyhow!("missing `sender`"))? + .try_into()?, + receiver: value.receiver, + meta: value.meta.ok_or(anyhow!("missing `meta`"))?.try_into()?, + }) + } + inner(value).context(format!( + "parsing {}", + pb::EventOutboundFungibleTokenTransfer::NAME + )) + } +} + +impl From for pb::EventOutboundFungibleTokenTransfer { + fn from(value: EventOutboundFungibleTokenTransfer) -> Self { + Self { + value: Some(value.value.into()), + sender: Some(value.sender.into()), + receiver: value.receiver, + meta: Some(value.meta.into()), + } + } +} + +impl DomainType for EventOutboundFungibleTokenTransfer { + type Proto = pb::EventOutboundFungibleTokenTransfer; +} + #[derive(Clone, Copy, Debug)] +#[repr(i32)] pub enum FungibleTokenRefundReason { - Timeout, - Error, -} - -pub fn outbound_fungible_token_refund( - value: Value, - sender: &Address, - receiver: String, - reason: FungibleTokenRefundReason, - meta: FungibleTokenTransferPacketMetadata, -) -> EventOutboundFungibleTokenRefund { - let reason = match reason { - FungibleTokenRefundReason::Timeout => Reason::Timeout, - FungibleTokenRefundReason::Error => Reason::Error, - }; - EventOutboundFungibleTokenRefund { - value: Some(value.into()), - sender: Some(sender.into()), - receiver, - reason: reason as i32, - meta: Some(meta), + Unspecified = 0, + Timeout = 1, + Error = 2, +} + +#[derive(Clone, Debug)] +pub struct EventOutboundFungibleTokenRefund { + pub value: Value, + pub sender: Address, + pub receiver: String, + pub reason: FungibleTokenRefundReason, + pub meta: FungibleTokenTransferPacketMetadata, +} + +impl TryFrom for EventOutboundFungibleTokenRefund { + type Error = anyhow::Error; + + fn try_from(value: pb::EventOutboundFungibleTokenRefund) -> Result { + fn inner( + value: pb::EventOutboundFungibleTokenRefund, + ) -> anyhow::Result { + use pb::event_outbound_fungible_token_refund::Reason; + let reason = match value.reason() { + Reason::Timeout => FungibleTokenRefundReason::Timeout, + Reason::Error => FungibleTokenRefundReason::Error, + Reason::Unspecified => FungibleTokenRefundReason::Unspecified, + }; + Ok(EventOutboundFungibleTokenRefund { + value: value.value.ok_or(anyhow!("missing `value`"))?.try_into()?, + sender: value + .sender + .ok_or(anyhow!("missing `sender`"))? + .try_into()?, + receiver: value.receiver, + reason, + meta: value.meta.ok_or(anyhow!("missing `meta`"))?.try_into()?, + }) + } + inner(value).context(format!( + "parsing {}", + pb::EventOutboundFungibleTokenRefund::NAME + )) } } -pub fn inbound_fungible_token_transfer( - value: Value, - sender: String, - receiver: &Address, - meta: FungibleTokenTransferPacketMetadata, -) -> EventInboundFungibleTokenTransfer { - EventInboundFungibleTokenTransfer { - value: Some(value.into()), - sender, - receiver: Some(receiver.into()), - meta: Some(meta), +impl From for pb::EventOutboundFungibleTokenRefund { + fn from(value: EventOutboundFungibleTokenRefund) -> Self { + Self { + value: Some(value.value.into()), + sender: Some(value.sender.into()), + receiver: value.receiver, + reason: value.reason as i32, + meta: Some(value.meta.into()), + } } } + +impl DomainType for EventOutboundFungibleTokenRefund { + type Proto = pb::EventOutboundFungibleTokenRefund; +} + +#[derive(Clone, Debug)] +pub struct EventInboundFungibleTokenTransfer { + pub value: Value, + pub sender: String, + pub receiver: Address, + pub meta: FungibleTokenTransferPacketMetadata, +} + +impl TryFrom for EventInboundFungibleTokenTransfer { + type Error = anyhow::Error; + + fn try_from(value: pb::EventInboundFungibleTokenTransfer) -> Result { + fn inner( + value: pb::EventInboundFungibleTokenTransfer, + ) -> anyhow::Result { + Ok(EventInboundFungibleTokenTransfer { + value: value.value.ok_or(anyhow!("missing `value`"))?.try_into()?, + sender: value.sender, + receiver: value + .receiver + .ok_or(anyhow!("missing `receiver`"))? + .try_into()?, + meta: value.meta.ok_or(anyhow!("missing `meta`"))?.try_into()?, + }) + } + inner(value).context(format!( + "parsing {}", + pb::EventInboundFungibleTokenTransfer::NAME + )) + } +} + +impl From for pb::EventInboundFungibleTokenTransfer { + fn from(value: EventInboundFungibleTokenTransfer) -> Self { + Self { + value: Some(value.value.into()), + sender: value.sender, + receiver: Some(value.receiver.into()), + meta: Some(value.meta.into()), + } + } +} + +impl DomainType for EventInboundFungibleTokenTransfer { + type Proto = pb::EventInboundFungibleTokenTransfer; +} diff --git a/crates/core/component/stake/src/component/action_handler/delegate.rs b/crates/core/component/stake/src/component/action_handler/delegate.rs index f27231f1d7..5b01bc9963 100644 --- a/crates/core/component/stake/src/component/action_handler/delegate.rs +++ b/crates/core/component/stake/src/component/action_handler/delegate.rs @@ -3,7 +3,7 @@ use async_trait::async_trait; use cnidarium::StateWrite; use cnidarium_component::ActionHandler; use penumbra_num::Amount; -use penumbra_proto::StateWriteProto; +use penumbra_proto::{DomainType, StateWriteProto}; use penumbra_sct::component::clock::EpochRead; use crate::{ @@ -132,7 +132,7 @@ impl ActionHandler for Delegate { // We queue the delegation so it can be processed at the epoch boundary. tracing::debug!(?self, "queuing delegation for next epoch"); state.push_delegation(self.clone()); - state.record_proto(event::delegate(self)); + state.record_proto(event::EventDelegate::from(self).to_proto()); Ok(()) } } diff --git a/crates/core/component/stake/src/component/action_handler/undelegate.rs b/crates/core/component/stake/src/component/action_handler/undelegate.rs index 6edfa1eaa8..da3947ee01 100644 --- a/crates/core/component/stake/src/component/action_handler/undelegate.rs +++ b/crates/core/component/stake/src/component/action_handler/undelegate.rs @@ -1,7 +1,7 @@ use anyhow::Result; use async_trait::async_trait; use cnidarium::StateWrite; -use penumbra_proto::StateWriteProto; +use penumbra_proto::{DomainType as _, StateWriteProto}; use penumbra_sct::component::clock::EpochRead; use penumbra_shielded_pool::component::AssetRegistry; @@ -85,7 +85,7 @@ impl ActionHandler for Undelegate { tracing::debug!(?self, "queuing undelegation for next epoch"); state.push_undelegation(self.clone()); - state.record_proto(event::undelegate(self)); + state.record_proto(event::EventUndelegate::from(self).to_proto()); Ok(()) } diff --git a/crates/core/component/stake/src/component/stake.rs b/crates/core/component/stake/src/component/stake.rs index c2984312d2..98b8bf8139 100644 --- a/crates/core/component/stake/src/component/stake.rs +++ b/crates/core/component/stake/src/component/stake.rs @@ -1,6 +1,6 @@ pub mod address; -use crate::event::slashing_penalty_applied; +use crate::event::EventSlashingPenaltyApplied; use crate::params::StakeParameters; use crate::rate::BaseRateData; use crate::validator::{self, Validator}; @@ -15,7 +15,7 @@ use cnidarium::{StateRead, StateWrite}; use cnidarium_component::Component; use futures::{StreamExt, TryStreamExt}; use penumbra_num::Amount; -use penumbra_proto::{StateReadProto, StateWriteProto}; +use penumbra_proto::{DomainType, StateReadProto, StateWriteProto}; use penumbra_sct::component::clock::EpochRead; use std::pin::Pin; use std::str::FromStr; @@ -475,11 +475,14 @@ pub(crate) trait RateDataWrite: StateWrite { let new_penalty = current_penalty.compound(slashing_penalty); // Emit an event indicating the validator had a slashing penalty applied. - self.record_proto(slashing_penalty_applied( - *identity_key, - current_epoch_index, - new_penalty, - )); + self.record_proto( + EventSlashingPenaltyApplied { + identity_key: *identity_key, + epoch_index: current_epoch_index, + new_penalty, + } + .to_proto(), + ); self.put( state_key::penalty::for_id_in_epoch(identity_key, current_epoch_index), new_penalty, diff --git a/crates/core/component/stake/src/component/validator_handler/uptime_tracker.rs b/crates/core/component/stake/src/component/validator_handler/uptime_tracker.rs index c22e77abe9..b3b9999165 100644 --- a/crates/core/component/stake/src/component/validator_handler/uptime_tracker.rs +++ b/crates/core/component/stake/src/component/validator_handler/uptime_tracker.rs @@ -17,7 +17,7 @@ use { async_trait::async_trait, cnidarium::StateWrite, futures::StreamExt as _, - penumbra_proto::StateWriteProto, + penumbra_proto::{DomainType, StateWriteProto}, penumbra_sct::component::clock::EpochRead, std::collections::BTreeMap, tap::Tap, @@ -178,7 +178,7 @@ pub trait ValidatorUptimeTracker: StateWrite { if !voted { // If the validator didn't sign, we need to emit a missed block event. - self.record_proto(event::validator_missed_block(identity_key)); + self.record_proto(event::EventValidatorMissedBlock { identity_key }.to_proto()); } uptime.mark_height_as_signed(height, voted)?; diff --git a/crates/core/component/stake/src/component/validator_handler/validator_manager.rs b/crates/core/component/stake/src/component/validator_handler/validator_manager.rs index 2fa17fc577..2064291966 100644 --- a/crates/core/component/stake/src/component/validator_handler/validator_manager.rs +++ b/crates/core/component/stake/src/component/validator_handler/validator_manager.rs @@ -24,7 +24,7 @@ use { cnidarium::StateWrite, penumbra_asset::asset, penumbra_num::Amount, - penumbra_proto::StateWriteProto, + penumbra_proto::{DomainType as _, StateWriteProto}, penumbra_sct::component::{ clock::{EpochManager, EpochRead}, StateReadExt as _, @@ -302,7 +302,13 @@ pub trait ValidatorManager: StateWrite { tracing::info!("successful state transition"); self.put(validator_state_path, new_state); - self.record_proto(event::validator_state_change(*identity_key, new_state)); + self.record_proto( + event::EventValidatorStateChange { + identity_key: *identity_key, + state: new_state, + } + .to_proto(), + ); Ok((old_state, new_state)) } @@ -481,7 +487,12 @@ pub trait ValidatorManager: StateWrite { // Track the validator's definition in an event (the rest of the attributes will be tracked // in events emitted by the calls to set_* methods below). - self.record_proto(event::validator_definition_upload(validator.clone())); + self.record_proto( + event::EventValidatorDefinitionUpload { + validator: validator.clone(), + } + .to_proto(), + ); // We initialize the validator's state, power, and bonding state. self.set_initial_validator_state(&validator_identity, initial_state)?; @@ -599,7 +610,7 @@ pub trait ValidatorManager: StateWrite { ); // Track the validator's definition in an event. - self.record_proto(event::validator_definition_upload(validator)); + self.record_proto(event::EventValidatorDefinitionUpload { validator }.to_proto()); Ok(()) } @@ -664,11 +675,14 @@ pub trait ValidatorManager: StateWrite { if let (Inactive | Jailed | Active, Tombstoned) = (old_state, new_state) { let current_height = self.get_block_height().await?; - self.record_proto(event::tombstone_validator( - current_height, - validator.identity_key.clone(), - evidence, - )); + self.record_proto( + event::EventTombstoneValidator::from_evidence( + current_height, + validator.identity_key.clone(), + evidence, + ) + .to_proto(), + ); } Ok(()) diff --git a/crates/core/component/stake/src/component/validator_handler/validator_store.rs b/crates/core/component/stake/src/component/validator_handler/validator_store.rs index 01e6e00bb4..b3e7508ce1 100644 --- a/crates/core/component/stake/src/component/validator_handler/validator_store.rs +++ b/crates/core/component/stake/src/component/validator_handler/validator_store.rs @@ -253,7 +253,13 @@ pub(crate) trait ValidatorDataWrite: StateWrite { state_key::validators::pool::bonding_state::by_id(identity_key), state.clone(), ); - self.record_proto(event::validator_bonding_state_change(*identity_key, state)); + self.record_proto( + event::EventValidatorBondingStateChange { + identity_key: *identity_key, + bonding_state: state, + } + .to_proto(), + ); } #[instrument(skip(self))] @@ -270,10 +276,13 @@ pub(crate) trait ValidatorDataWrite: StateWrite { state_key::validators::power::by_id(identity_key), voting_power, ); - self.record_proto(event::validator_voting_power_change( - *identity_key, - voting_power, - )); + self.record_proto( + event::EventValidatorVotingPowerChange { + identity_key: *identity_key, + voting_power, + } + .to_proto(), + ); Ok(()) } @@ -290,7 +299,13 @@ pub(crate) trait ValidatorDataWrite: StateWrite { } self.put(state_key::validators::state::by_id(id), initial_state); - self.record_proto(event::validator_state_change(*id, initial_state)); + self.record_proto( + event::EventValidatorStateChange { + identity_key: *id, + state: initial_state, + } + .to_proto(), + ); Ok(()) } @@ -301,7 +316,13 @@ pub(crate) trait ValidatorDataWrite: StateWrite { state_key::validators::rate::current_by_id(identity_key), rate_data.clone(), ); - self.record_proto(event::validator_rate_data_change(*identity_key, rate_data)); + self.record_proto( + event::EventRateDataChange { + identity_key: *identity_key, + rate_data, + } + .to_proto(), + ); } #[instrument(skip(self))] diff --git a/crates/core/component/stake/src/event.rs b/crates/core/component/stake/src/event.rs index 86de0322b2..cb0d184af4 100644 --- a/crates/core/component/stake/src/event.rs +++ b/crates/core/component/stake/src/event.rs @@ -1,100 +1,448 @@ use crate::{ - rate, + rate::RateData, validator::{BondingState, State, Validator}, Delegate, IdentityKey, Penalty, Undelegate, }; +use anyhow::{anyhow, Context as _}; use penumbra_num::Amount; -use penumbra_proto::core::component::stake::v1 as pb; +use penumbra_proto::{core::component::stake::v1 as pb, DomainType, Name as _}; use tendermint::abci::types::Misbehavior; -pub fn validator_state_change( - identity_key: IdentityKey, - state: State, -) -> pb::EventValidatorStateChange { - pb::EventValidatorStateChange { - identity_key: Some(identity_key.into()), - state: Some(state.into()), +#[derive(Clone, Debug)] +pub struct EventValidatorStateChange { + pub identity_key: IdentityKey, + pub state: State, +} + +impl TryFrom for EventValidatorStateChange { + type Error = anyhow::Error; + + fn try_from(value: pb::EventValidatorStateChange) -> Result { + fn inner( + value: pb::EventValidatorStateChange, + ) -> anyhow::Result { + Ok(EventValidatorStateChange { + identity_key: value + .identity_key + .ok_or(anyhow!("missing `identity_key`"))? + .try_into()?, + state: value.state.ok_or(anyhow!("missing `state`"))?.try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventValidatorStateChange::NAME)) } } -pub fn validator_voting_power_change( - identity_key: IdentityKey, - voting_power: Amount, -) -> pb::EventValidatorVotingPowerChange { - pb::EventValidatorVotingPowerChange { - identity_key: Some(identity_key.into()), - voting_power: Some(voting_power.into()), +impl From for pb::EventValidatorStateChange { + fn from(value: EventValidatorStateChange) -> Self { + Self { + identity_key: Some(value.identity_key.into()), + state: Some(value.state.into()), + } } } -pub fn validator_bonding_state_change( - identity_key: IdentityKey, - bonding_state: BondingState, -) -> pb::EventValidatorBondingStateChange { - pb::EventValidatorBondingStateChange { - identity_key: Some(identity_key.into()), - bonding_state: Some(bonding_state.into()), +impl DomainType for EventValidatorStateChange { + type Proto = pb::EventValidatorStateChange; +} + +#[derive(Clone, Debug)] +pub struct EventValidatorVotingPowerChange { + pub identity_key: IdentityKey, + pub voting_power: Amount, +} + +impl TryFrom for EventValidatorVotingPowerChange { + type Error = anyhow::Error; + + fn try_from(value: pb::EventValidatorVotingPowerChange) -> Result { + fn inner( + value: pb::EventValidatorVotingPowerChange, + ) -> anyhow::Result { + Ok(EventValidatorVotingPowerChange { + identity_key: value + .identity_key + .ok_or(anyhow!("missing `identity_key`"))? + .try_into()?, + voting_power: value + .voting_power + .ok_or(anyhow!("missing `voting_power`"))? + .try_into()?, + }) + } + inner(value).context(format!( + "parsing {}", + pb::EventValidatorVotingPowerChange::NAME + )) } } -pub fn validator_rate_data_change( - identity_key: IdentityKey, - rate_data: rate::RateData, -) -> pb::EventRateDataChange { - pb::EventRateDataChange { - identity_key: Some(identity_key.into()), - rate_data: Some(rate_data.into()), +impl From for pb::EventValidatorVotingPowerChange { + fn from(value: EventValidatorVotingPowerChange) -> Self { + Self { + identity_key: Some(value.identity_key.into()), + voting_power: Some(value.voting_power.into()), + } } } -pub fn validator_definition_upload(validator: Validator) -> pb::EventValidatorDefinitionUpload { - pb::EventValidatorDefinitionUpload { - validator: Some(validator.into()), +impl DomainType for EventValidatorVotingPowerChange { + type Proto = pb::EventValidatorVotingPowerChange; +} + +#[derive(Clone, Debug)] +pub struct EventValidatorBondingStateChange { + pub identity_key: IdentityKey, + pub bonding_state: BondingState, +} + +impl TryFrom for EventValidatorBondingStateChange { + type Error = anyhow::Error; + + fn try_from(value: pb::EventValidatorBondingStateChange) -> Result { + fn inner( + value: pb::EventValidatorBondingStateChange, + ) -> anyhow::Result { + Ok(EventValidatorBondingStateChange { + identity_key: value + .identity_key + .ok_or(anyhow!("missing `identity_key`"))? + .try_into()?, + bonding_state: value + .bonding_state + .ok_or(anyhow!("missing `bonding_state`"))? + .try_into()?, + }) + } + inner(value).context(format!( + "parsing {}", + pb::EventValidatorBondingStateChange::NAME + )) } } -pub fn validator_missed_block(identity_key: IdentityKey) -> pb::EventValidatorMissedBlock { - pb::EventValidatorMissedBlock { - identity_key: Some(identity_key.into()), +impl From for pb::EventValidatorBondingStateChange { + fn from(value: EventValidatorBondingStateChange) -> Self { + Self { + identity_key: Some(value.identity_key.into()), + bonding_state: Some(value.bonding_state.into()), + } } } -pub fn delegate(delegate: &Delegate) -> pb::EventDelegate { - pb::EventDelegate { - identity_key: Some(delegate.validator_identity.into()), - amount: Some(delegate.unbonded_amount.into()), +impl DomainType for EventValidatorBondingStateChange { + type Proto = pb::EventValidatorBondingStateChange; +} + +#[derive(Clone, Debug)] +pub struct EventRateDataChange { + pub identity_key: IdentityKey, + pub rate_data: RateData, +} + +impl TryFrom for EventRateDataChange { + type Error = anyhow::Error; + + fn try_from(value: pb::EventRateDataChange) -> Result { + fn inner(value: pb::EventRateDataChange) -> anyhow::Result { + Ok(EventRateDataChange { + identity_key: value + .identity_key + .ok_or(anyhow!("missing `identity_key`"))? + .try_into()?, + rate_data: value + .rate_data + .ok_or(anyhow!("missing `rate_data`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventRateDataChange::NAME)) } } -pub fn undelegate(undelegate: &Undelegate) -> pb::EventUndelegate { - pb::EventUndelegate { - identity_key: Some(undelegate.validator_identity.into()), - amount: Some(undelegate.unbonded_amount.into()), +impl From for pb::EventRateDataChange { + fn from(value: EventRateDataChange) -> Self { + Self { + identity_key: Some(value.identity_key.into()), + rate_data: Some(value.rate_data.into()), + } } } -pub fn tombstone_validator( - current_height: u64, - identity_key: IdentityKey, - evidence: &Misbehavior, -) -> pb::EventTombstoneValidator { - pb::EventTombstoneValidator { - evidence_height: evidence.height.value(), - current_height, - identity_key: Some(identity_key.into()), - address: evidence.validator.address.to_vec(), - voting_power: evidence.validator.power.value(), +impl DomainType for EventRateDataChange { + type Proto = pb::EventRateDataChange; +} + +#[derive(Clone, Debug)] +pub struct EventValidatorDefinitionUpload { + pub validator: Validator, +} + +impl TryFrom for EventValidatorDefinitionUpload { + type Error = anyhow::Error; + + fn try_from(value: pb::EventValidatorDefinitionUpload) -> Result { + fn inner( + value: pb::EventValidatorDefinitionUpload, + ) -> anyhow::Result { + Ok(EventValidatorDefinitionUpload { + validator: value + .validator + .ok_or(anyhow!("missing `validator`"))? + .try_into()?, + }) + } + inner(value).context(format!( + "parsing {}", + pb::EventValidatorDefinitionUpload::NAME + )) } } -pub fn slashing_penalty_applied( - identity_key: IdentityKey, - epoch_index: u64, - new_penalty: Penalty, -) -> pb::EventSlashingPenaltyApplied { - pb::EventSlashingPenaltyApplied { - identity_key: Some(identity_key.into()), - epoch_index, - new_penalty: Some(new_penalty.into()), +impl From for pb::EventValidatorDefinitionUpload { + fn from(value: EventValidatorDefinitionUpload) -> Self { + Self { + validator: Some(value.validator.into()), + } + } +} + +impl DomainType for EventValidatorDefinitionUpload { + type Proto = pb::EventValidatorDefinitionUpload; +} + +#[derive(Clone, Debug)] +pub struct EventValidatorMissedBlock { + pub identity_key: IdentityKey, +} + +impl TryFrom for EventValidatorMissedBlock { + type Error = anyhow::Error; + + fn try_from(value: pb::EventValidatorMissedBlock) -> Result { + fn inner( + value: pb::EventValidatorMissedBlock, + ) -> anyhow::Result { + Ok(EventValidatorMissedBlock { + identity_key: value + .identity_key + .ok_or(anyhow!("missing `identity_key`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventValidatorMissedBlock::NAME)) + } +} + +impl From for pb::EventValidatorMissedBlock { + fn from(value: EventValidatorMissedBlock) -> Self { + Self { + identity_key: Some(value.identity_key.into()), + } + } +} + +impl DomainType for EventValidatorMissedBlock { + type Proto = pb::EventValidatorMissedBlock; +} + +#[derive(Clone, Debug)] +pub struct EventDelegate { + pub identity_key: IdentityKey, + pub amount: Amount, +} + +impl From<&Delegate> for EventDelegate { + fn from(value: &Delegate) -> Self { + Self { + identity_key: value.validator_identity, + amount: value.unbonded_amount, + } + } +} + +impl TryFrom for EventDelegate { + type Error = anyhow::Error; + + fn try_from(value: pb::EventDelegate) -> Result { + fn inner(value: pb::EventDelegate) -> anyhow::Result { + Ok(EventDelegate { + identity_key: value + .identity_key + .ok_or(anyhow!("missing `identity_key`"))? + .try_into()?, + amount: value + .amount + .ok_or(anyhow!("missing `amount`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventDelegate::NAME)) } } + +impl From for pb::EventDelegate { + fn from(value: EventDelegate) -> Self { + Self { + identity_key: Some(value.identity_key.into()), + amount: Some(value.amount.into()), + } + } +} + +impl DomainType for EventDelegate { + type Proto = pb::EventDelegate; +} + +#[derive(Clone, Debug)] +pub struct EventUndelegate { + pub identity_key: IdentityKey, + pub amount: Amount, +} + +impl From<&Undelegate> for EventUndelegate { + fn from(value: &Undelegate) -> Self { + Self { + identity_key: value.validator_identity, + amount: value.unbonded_amount, + } + } +} + +impl TryFrom for EventUndelegate { + type Error = anyhow::Error; + + fn try_from(value: pb::EventUndelegate) -> Result { + fn inner(value: pb::EventUndelegate) -> anyhow::Result { + Ok(EventUndelegate { + identity_key: value + .identity_key + .ok_or(anyhow!("missing `identity_key`"))? + .try_into()?, + amount: value + .amount + .ok_or(anyhow!("missing `amount`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventUndelegate::NAME)) + } +} + +impl From for pb::EventUndelegate { + fn from(value: EventUndelegate) -> Self { + Self { + identity_key: Some(value.identity_key.into()), + amount: Some(value.amount.into()), + } + } +} + +impl DomainType for EventUndelegate { + type Proto = pb::EventUndelegate; +} + +#[derive(Clone, Debug)] +pub struct EventTombstoneValidator { + pub evidence_height: u64, + pub current_height: u64, + pub identity_key: IdentityKey, + pub address: Vec, + pub voting_power: u64, +} + +impl EventTombstoneValidator { + pub fn from_evidence( + current_height: u64, + identity_key: IdentityKey, + evidence: &Misbehavior, + ) -> Self { + Self { + evidence_height: evidence.height.value(), + current_height, + identity_key, + address: evidence.validator.address.to_vec(), + voting_power: evidence.validator.power.value(), + } + } +} + +impl TryFrom for EventTombstoneValidator { + type Error = anyhow::Error; + + fn try_from(value: pb::EventTombstoneValidator) -> Result { + fn inner(value: pb::EventTombstoneValidator) -> anyhow::Result { + Ok(EventTombstoneValidator { + evidence_height: value.evidence_height, + current_height: value.current_height, + identity_key: value + .identity_key + .ok_or(anyhow!("missing `identity_key`"))? + .try_into()?, + address: value.address, + voting_power: value.voting_power, + }) + } + inner(value).context(format!("parsing {}", pb::EventTombstoneValidator::NAME)) + } +} + +impl From for pb::EventTombstoneValidator { + fn from(value: EventTombstoneValidator) -> Self { + Self { + evidence_height: value.evidence_height, + current_height: value.current_height, + identity_key: Some(value.identity_key.into()), + address: value.address, + voting_power: value.voting_power, + } + } +} + +impl DomainType for EventTombstoneValidator { + type Proto = pb::EventTombstoneValidator; +} + +#[derive(Clone, Debug)] +pub struct EventSlashingPenaltyApplied { + pub identity_key: IdentityKey, + pub epoch_index: u64, + pub new_penalty: Penalty, +} + +impl TryFrom for EventSlashingPenaltyApplied { + type Error = anyhow::Error; + + fn try_from(value: pb::EventSlashingPenaltyApplied) -> Result { + fn inner( + value: pb::EventSlashingPenaltyApplied, + ) -> anyhow::Result { + Ok(EventSlashingPenaltyApplied { + identity_key: value + .identity_key + .ok_or(anyhow!("missing `identity_key`"))? + .try_into()?, + epoch_index: value.epoch_index, + new_penalty: value + .new_penalty + .ok_or(anyhow!("missing `new_penalty`"))? + .try_into()?, + }) + } + inner(value).context(format!("parsing {}", pb::EventSlashingPenaltyApplied::NAME)) + } +} + +impl From for pb::EventSlashingPenaltyApplied { + fn from(value: EventSlashingPenaltyApplied) -> Self { + Self { + identity_key: Some(value.identity_key.into()), + epoch_index: value.epoch_index, + new_penalty: Some(value.new_penalty.into()), + } + } +} + +impl DomainType for EventSlashingPenaltyApplied { + type Proto = pb::EventSlashingPenaltyApplied; +} diff --git a/crates/core/component/stake/src/lib.rs b/crates/core/component/stake/src/lib.rs index eb574260df..ccaab1c2be 100644 --- a/crates/core/component/stake/src/lib.rs +++ b/crates/core/component/stake/src/lib.rs @@ -6,7 +6,7 @@ mod changes; mod current_consensus_keys; mod delegation_token; -mod event; +pub mod event; mod governance_key; mod identity_key; mod penalty; From 35f9f2f64a652b6055a39ff227e504636436cb7e Mon Sep 17 00:00:00 2001 From: Conor Schaefer Date: Tue, 22 Oct 2024 13:03:08 -0700 Subject: [PATCH 43/43] chore: release version 0.80.7 --- Cargo.lock | 98 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 50 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62d71d39c3..e15746f79a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1233,7 +1233,7 @@ dependencies = [ [[package]] name = "cnidarium" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "async-trait", @@ -1269,7 +1269,7 @@ dependencies = [ [[package]] name = "cnidarium-component" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "async-trait", @@ -1307,7 +1307,7 @@ dependencies = [ [[package]] name = "cometindex" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "async-trait", @@ -1668,7 +1668,7 @@ dependencies = [ [[package]] name = "decaf377-fmd" -version = "0.80.6" +version = "0.80.7" dependencies = [ "ark-ff", "ark-serialize", @@ -1683,7 +1683,7 @@ dependencies = [ [[package]] name = "decaf377-frost" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -1698,7 +1698,7 @@ dependencies = [ [[package]] name = "decaf377-ka" -version = "0.80.6" +version = "0.80.7" dependencies = [ "ark-ff", "decaf377", @@ -4213,7 +4213,7 @@ dependencies = [ [[package]] name = "pcli" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -4294,7 +4294,7 @@ dependencies = [ [[package]] name = "pclientd" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "assert_cmd", @@ -4346,7 +4346,7 @@ dependencies = [ [[package]] name = "pd" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -4499,7 +4499,7 @@ dependencies = [ [[package]] name = "penumbra-app" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -4588,7 +4588,7 @@ dependencies = [ [[package]] name = "penumbra-asset" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -4628,7 +4628,7 @@ dependencies = [ [[package]] name = "penumbra-auction" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -4683,7 +4683,7 @@ dependencies = [ [[package]] name = "penumbra-auto-https" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "axum-server", @@ -4695,7 +4695,7 @@ dependencies = [ [[package]] name = "penumbra-bench" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-bls12-377", @@ -4739,7 +4739,7 @@ dependencies = [ [[package]] name = "penumbra-community-pool" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -4771,7 +4771,7 @@ dependencies = [ [[package]] name = "penumbra-compact-block" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -4806,7 +4806,7 @@ dependencies = [ [[package]] name = "penumbra-custody" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "argon2", @@ -4842,7 +4842,7 @@ dependencies = [ [[package]] name = "penumbra-dex" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -4904,7 +4904,7 @@ dependencies = [ [[package]] name = "penumbra-distributions" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "async-trait", @@ -4922,7 +4922,7 @@ dependencies = [ [[package]] name = "penumbra-eddy" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -4940,7 +4940,7 @@ dependencies = [ [[package]] name = "penumbra-fee" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -4967,7 +4967,7 @@ dependencies = [ [[package]] name = "penumbra-funding" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "async-trait", @@ -4990,7 +4990,7 @@ dependencies = [ [[package]] name = "penumbra-governance" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -5044,7 +5044,7 @@ dependencies = [ [[package]] name = "penumbra-ibc" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -5081,7 +5081,7 @@ dependencies = [ [[package]] name = "penumbra-keys" -version = "0.80.6" +version = "0.80.7" dependencies = [ "aes", "anyhow", @@ -5128,7 +5128,7 @@ dependencies = [ [[package]] name = "penumbra-measure" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "bytesize", @@ -5146,7 +5146,7 @@ dependencies = [ [[package]] name = "penumbra-mock-client" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "cnidarium", @@ -5163,7 +5163,7 @@ dependencies = [ [[package]] name = "penumbra-mock-consensus" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "bytes", @@ -5183,7 +5183,7 @@ dependencies = [ [[package]] name = "penumbra-mock-tendermint-proxy" -version = "0.80.6" +version = "0.80.7" dependencies = [ "hex", "pbjson-types", @@ -5198,7 +5198,7 @@ dependencies = [ [[package]] name = "penumbra-num" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -5235,7 +5235,7 @@ dependencies = [ [[package]] name = "penumbra-proof-params" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ec", @@ -5263,7 +5263,7 @@ dependencies = [ [[package]] name = "penumbra-proof-setup" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ec", @@ -5290,7 +5290,7 @@ dependencies = [ [[package]] name = "penumbra-proto" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "async-trait", @@ -5324,7 +5324,7 @@ dependencies = [ [[package]] name = "penumbra-sct" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -5360,7 +5360,7 @@ dependencies = [ [[package]] name = "penumbra-shielded-pool" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -5414,7 +5414,7 @@ dependencies = [ [[package]] name = "penumbra-stake" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -5467,7 +5467,7 @@ dependencies = [ [[package]] name = "penumbra-tct" -version = "0.80.6" +version = "0.80.7" dependencies = [ "ark-ed-on-bls12-377", "ark-ff", @@ -5499,7 +5499,7 @@ dependencies = [ [[package]] name = "penumbra-tct-property-test" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "futures", @@ -5511,7 +5511,7 @@ dependencies = [ [[package]] name = "penumbra-tct-visualize" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "axum", @@ -5541,7 +5541,7 @@ dependencies = [ [[package]] name = "penumbra-tendermint-proxy" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "chrono", @@ -5573,7 +5573,7 @@ dependencies = [ [[package]] name = "penumbra-test-subscriber" -version = "0.80.6" +version = "0.80.7" dependencies = [ "tracing", "tracing-subscriber 0.3.18", @@ -5581,7 +5581,7 @@ dependencies = [ [[package]] name = "penumbra-tower-trace" -version = "0.80.6" +version = "0.80.7" dependencies = [ "futures", "hex", @@ -5602,7 +5602,7 @@ dependencies = [ [[package]] name = "penumbra-transaction" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-ff", @@ -5655,7 +5655,7 @@ dependencies = [ [[package]] name = "penumbra-txhash" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "blake2b_simd 1.0.2", @@ -5668,7 +5668,7 @@ dependencies = [ [[package]] name = "penumbra-view" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-std", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "penumbra-wallet" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-std", @@ -5812,7 +5812,7 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pindexer" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "chrono", @@ -5907,7 +5907,7 @@ dependencies = [ [[package]] name = "pmonitor" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "assert_cmd", @@ -7706,7 +7706,7 @@ checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" [[package]] name = "summonerd" -version = "0.80.6" +version = "0.80.7" dependencies = [ "anyhow", "ark-groth16", diff --git a/Cargo.toml b/Cargo.toml index 3d2de2f1bc..a2b54d94b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -104,7 +104,7 @@ push = false [workspace.package] authors = ["Penumbra Labs