diff --git a/common/utils/flags.go b/common/utils/flags.go index e6e4f64222..ef5919d54a 100644 --- a/common/utils/flags.go +++ b/common/utils/flags.go @@ -21,6 +21,7 @@ var ( // RollupRelayerFlags contains flags only used in rollup-relayer RollupRelayerFlags = []cli.Flag{ &ImportGenesisFlag, + &MinCodecVersionFlag, } // ConfigFileFlag load json type config file. ConfigFileFlag = cli.StringFlag{ @@ -90,4 +91,10 @@ var ( Usage: "Genesis file of the network", Value: "./conf/genesis.json", } + // MinCodecVersionFlag defines the minimum codec version required for the chunk/batch/bundle proposers + MinCodecVersionFlag = cli.UintFlag{ + Name: "min-codec-version", + Usage: "Minimum required codec version for the chunk/batch/bundle proposers", + Required: true, + } ) diff --git a/common/version/version.go b/common/version/version.go index 83bff50dea..1e467875ba 100644 --- a/common/version/version.go +++ b/common/version/version.go @@ -5,7 +5,7 @@ import ( "runtime/debug" ) -var tag = "v4.4.72" +var tag = "v4.4.81" var commit = func() string { if info, ok := debug.ReadBuildInfo(); ok { diff --git a/rollup/abi/bridge_abi.go b/rollup/abi/bridge_abi.go index 35f97824ff..ba7a6d6b20 100644 --- a/rollup/abi/bridge_abi.go +++ b/rollup/abi/bridge_abi.go @@ -34,5 +34,5 @@ var L2GasPriceOracleMetaData = &bind.MetaData{ // L1GasPriceOracleMetaData contains all meta data concerning the L1GasPriceOracle contract. var L1GasPriceOracleMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"BlobScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"CommitScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BlobBaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"blobScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"commitScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BlobBaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFeeAndBlobBaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"BlobScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"CommitScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BlobBaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"blobScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"commitScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BlobBaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFeeAndBlobBaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", } diff --git a/rollup/abi/bridge_abi_test.go b/rollup/abi/bridge_abi_test.go index 70ab6564ba..e20fb726e1 100644 --- a/rollup/abi/bridge_abi_test.go +++ b/rollup/abi/bridge_abi_test.go @@ -52,14 +52,15 @@ func TestPackImportGenesisBatch(t *testing.T) { assert.NoError(err) } -func TestPackSetL1BaseFee(t *testing.T) { +func TestPackSetL1BaseFeeAndBlobBaseFee(t *testing.T) { assert := assert.New(t) l1GasOracleABI, err := L1GasPriceOracleMetaData.GetAbi() assert.NoError(err) baseFee := big.NewInt(2333) - _, err = l1GasOracleABI.Pack("setL1BaseFee", baseFee) + blobBaseFee := big.NewInt(1) + _, err = l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", baseFee, blobBaseFee) assert.NoError(err) } diff --git a/rollup/cmd/gas_oracle/app/app.go b/rollup/cmd/gas_oracle/app/app.go index 5ad23819e9..43775b044a 100644 --- a/rollup/cmd/gas_oracle/app/app.go +++ b/rollup/cmd/gas_oracle/app/app.go @@ -22,7 +22,7 @@ import ( "scroll-tech/rollup/internal/config" "scroll-tech/rollup/internal/controller/relayer" "scroll-tech/rollup/internal/controller/watcher" - butils "scroll-tech/rollup/internal/utils" + rutils "scroll-tech/rollup/internal/utils" ) var app *cli.App @@ -78,15 +78,9 @@ func action(ctx *cli.Context) error { log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err) } - genesisPath := ctx.String(utils.Genesis.Name) - genesis, err := utils.ReadGenesis(genesisPath) - if err != nil { - log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err) - } - l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, db, registry) - l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL1GasOracle, registry) + l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry) if err != nil { log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err) } @@ -98,7 +92,7 @@ func action(ctx *cli.Context) error { go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) { // Fetch the latest block number to decrease the delay when fetching gas prices // Use latest block number - 1 to prevent frequent reorg - number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l1client, rpc.LatestBlockNumber) + number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l1client, rpc.LatestBlockNumber) if loopErr != nil { log.Error("failed to get block number", "err", loopErr) return diff --git a/rollup/cmd/rollup_relayer/app/app.go b/rollup/cmd/rollup_relayer/app/app.go index 939d4b7798..080f27cdf7 100644 --- a/rollup/cmd/rollup_relayer/app/app.go +++ b/rollup/cmd/rollup_relayer/app/app.go @@ -8,6 +8,7 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/ethclient" "github.com/scroll-tech/go-ethereum/log" "github.com/urfave/cli/v2" @@ -20,7 +21,7 @@ import ( "scroll-tech/rollup/internal/config" "scroll-tech/rollup/internal/controller/relayer" "scroll-tech/rollup/internal/controller/watcher" - butils "scroll-tech/rollup/internal/utils" + rutils "scroll-tech/rollup/internal/utils" ) var app *cli.App @@ -84,15 +85,16 @@ func action(ctx *cli.Context) error { log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err) } - chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, genesis.Config, db, registry) - batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, genesis.Config, db, registry) - bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, genesis.Config, db, registry) + minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name)) + chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry) + batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry) + bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry) l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, genesis.Config, db, registry) // Watcher loop to fetch missing blocks go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) { - number, loopErr := butils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations) + number, loopErr := rutils.GetLatestConfirmedBlockNumber(ctx, l2client, cfg.L2Config.Confirmations) if loopErr != nil { log.Error("failed to get block number", "err", loopErr) return @@ -108,8 +110,6 @@ func action(ctx *cli.Context) error { go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches) - go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches) - go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessPendingBundles) // Finish start all rollup relayer functions. diff --git a/rollup/conf/config.json b/rollup/conf/config.json index 50d6b57262..c522c0fcfa 100644 --- a/rollup/conf/config.json +++ b/rollup/conf/config.json @@ -18,11 +18,10 @@ "gas_oracle_config": { "min_gas_price": 0, "gas_price_diff": 50000, - "l1_base_fee_weight": 0.132, - "l1_blob_base_fee_weight": 0.145, "check_committed_batches_window_minutes": 5, "l1_base_fee_default": 15000000000, - "l1_blob_base_fee_default": 1 + "l1_blob_base_fee_default": 1, + "l1_blob_base_fee_threshold": 0 }, "gas_oracle_sender_signer_config": { "signer_type": "PrivateKey", diff --git a/rollup/internal/config/relayer.go b/rollup/internal/config/relayer.go index 1a1d3a1a46..f76010890c 100644 --- a/rollup/internal/config/relayer.go +++ b/rollup/internal/config/relayer.go @@ -85,15 +85,13 @@ type GasOracleConfig struct { // AlternativeGasTokenConfig The configuration for handling token exchange rates when updating the gas price oracle. AlternativeGasTokenConfig *AlternativeGasTokenConfig `json:"alternative_gas_token_config"` - // The following configs are only for updating L1 gas price, used for sender in L2. - // The weight for L1 base fee. - L1BaseFeeWeight float64 `json:"l1_base_fee_weight"` - // The weight for L1 blob base fee. - L1BlobBaseFeeWeight float64 `json:"l1_blob_base_fee_weight"` // CheckCommittedBatchesWindowMinutes the time frame to check if we committed batches to decide to update gas oracle or not in minutes CheckCommittedBatchesWindowMinutes int `json:"check_committed_batches_window_minutes"` L1BaseFeeDefault uint64 `json:"l1_base_fee_default"` L1BlobBaseFeeDefault uint64 `json:"l1_blob_base_fee_default"` + + // L1BlobBaseFeeThreshold the threshold of L1 blob base fee to enter the default gas price mode + L1BlobBaseFeeThreshold uint64 `json:"l1_blob_base_fee_threshold"` } // SignerConfig - config of signer, contains type and config corresponding to type diff --git a/rollup/internal/controller/relayer/l1_relayer.go b/rollup/internal/controller/relayer/l1_relayer.go index ed2f39eb1f..36aeade98b 100644 --- a/rollup/internal/controller/relayer/l1_relayer.go +++ b/rollup/internal/controller/relayer/l1_relayer.go @@ -11,7 +11,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/params" "gorm.io/gorm" "scroll-tech/common/types" @@ -30,18 +29,15 @@ import ( type Layer1Relayer struct { ctx context.Context - cfg *config.RelayerConfig - chainCfg *params.ChainConfig + cfg *config.RelayerConfig gasOracleSender *sender.Sender l1GasOracleABI *abi.ABI - lastBaseFee uint64 - lastBlobBaseFee uint64 - minGasPrice uint64 - gasPriceDiff uint64 - l1BaseFeeWeight float64 - l1BlobBaseFeeWeight float64 + lastBaseFee uint64 + lastBlobBaseFee uint64 + minGasPrice uint64 + gasPriceDiff uint64 l1BlockOrm *orm.L1Block l2BlockOrm *orm.L2Block @@ -51,7 +47,7 @@ type Layer1Relayer struct { } // NewLayer1Relayer will return a new instance of Layer1RelayerClient -func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, serviceType ServiceType, reg prometheus.Registerer) (*Layer1Relayer, error) { +func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, serviceType ServiceType, reg prometheus.Registerer) (*Layer1Relayer, error) { var gasOracleSender *sender.Sender var err error @@ -82,7 +78,6 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi l1Relayer := &Layer1Relayer{ cfg: cfg, - chainCfg: chainCfg, ctx: ctx, l1BlockOrm: orm.NewL1Block(db), l2BlockOrm: orm.NewL2Block(db), @@ -91,10 +86,8 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi gasOracleSender: gasOracleSender, l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI, - minGasPrice: minGasPrice, - gasPriceDiff: gasPriceDiff, - l1BaseFeeWeight: cfg.GasOracleConfig.L1BaseFeeWeight, - l1BlobBaseFeeWeight: cfg.GasOracleConfig.L1BlobBaseFeeWeight, + minGasPrice: minGasPrice, + gasPriceDiff: gasPriceDiff, } l1Relayer.metrics = initL1RelayerMetrics(reg) @@ -132,25 +125,13 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() { block := blocks[0] if types.GasOracleStatus(block.GasOracleStatus) == types.GasOraclePending { - latestL2Height, err := r.l2BlockOrm.GetL2BlocksLatestHeight(r.ctx) - if err != nil { - log.Warn("Failed to fetch latest L2 block height from db", "err", err) + if block.BaseFee == 0 || block.BlobBaseFee == 0 { + log.Error("Invalid base fee or blob base fee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", block.BaseFee, "block.BlobBaseFee", block.BlobBaseFee) return } - var isBernoulli = block.BlobBaseFee > 0 && r.chainCfg.IsBernoulli(new(big.Int).SetUint64(latestL2Height)) - var isCurie = block.BlobBaseFee > 0 && r.chainCfg.IsCurie(new(big.Int).SetUint64(latestL2Height)) - - var baseFee uint64 - var blobBaseFee uint64 - if isCurie { - baseFee = block.BaseFee - blobBaseFee = block.BlobBaseFee - } else if isBernoulli { - baseFee = uint64(math.Ceil(r.l1BaseFeeWeight*float64(block.BaseFee) + r.l1BlobBaseFeeWeight*float64(block.BlobBaseFee))) - } else { - baseFee = block.BaseFee - } + baseFee := block.BaseFee + blobBaseFee := block.BlobBaseFee // include the token exchange rate in the fee data if alternative gas token enabled if r.cfg.GasOracleConfig.AlternativeGasTokenConfig != nil && r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Enabled { @@ -177,12 +158,12 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() { blobBaseFee = uint64(math.Ceil(float64(blobBaseFee) / exchangeRate)) } - if r.shouldUpdateGasOracle(baseFee, blobBaseFee, isCurie) { + if r.shouldUpdateGasOracle(baseFee, blobBaseFee) { // It indicates the committing batch has been stuck for a long time, it's likely that the L1 gas fee spiked. // If we are not committing batches due to high fees then we shouldn't update fees to prevent users from paying high l1_data_fee // Also, set fees to some default value, because we have already updated fees to some high values, probably var reachTimeout bool - if reachTimeout, err = r.commitBatchReachTimeout(); reachTimeout && err == nil { + if reachTimeout, err = r.commitBatchReachTimeout(); reachTimeout && block.BlobBaseFee > r.cfg.GasOracleConfig.L1BlobBaseFeeThreshold && err == nil { if r.lastBaseFee == r.cfg.GasOracleConfig.L1BaseFeeDefault && r.lastBlobBaseFee == r.cfg.GasOracleConfig.L1BlobBaseFeeDefault { return } @@ -191,24 +172,15 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() { } else if err != nil { return } - var data []byte - if isCurie { - data, err = r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee)) - if err != nil { - log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err) - return - } - } else { - data, err = r.l1GasOracleABI.Pack("setL1BaseFee", new(big.Int).SetUint64(baseFee)) - if err != nil { - log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err) - return - } + data, err := r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee)) + if err != nil { + log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err) + return } hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil, 0) if err != nil { - log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err) + log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err) return } @@ -222,7 +194,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() { r.lastBlobBaseFee = blobBaseFee r.metrics.rollupL1RelayerLatestBaseFee.Set(float64(r.lastBaseFee)) r.metrics.rollupL1RelayerLatestBlobBaseFee.Set(float64(r.lastBlobBaseFee)) - log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie) + log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee) } } } @@ -271,9 +243,10 @@ func (r *Layer1Relayer) StopSenders() { } } -func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64, isCurie bool) bool { +func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64) bool { // Right after restarting. if r.lastBaseFee == 0 { + log.Info("First time to update gas oracle after restarting", "baseFee", baseFee, "blobBaseFee", blobBaseFee) return true } @@ -282,16 +255,6 @@ func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64 return true } - // Omitting blob base fee checks before Curie. - if !isCurie { - return false - } - - // Right after enabling Curie. - if r.lastBlobBaseFee == 0 { - return true - } - expectedBlobBaseFeeDelta := r.lastBlobBaseFee * r.gasPriceDiff / gasPriceDiffPrecision // Plus a minimum of 0.01 gwei, since the blob base fee is usually low, preventing short-time flunctuation. expectedBlobBaseFeeDelta += 10000000 diff --git a/rollup/internal/controller/relayer/l1_relayer_test.go b/rollup/internal/controller/relayer/l1_relayer_test.go index 6ac67fdc5b..129378eff4 100644 --- a/rollup/internal/controller/relayer/l1_relayer_test.go +++ b/rollup/internal/controller/relayer/l1_relayer_test.go @@ -8,7 +8,6 @@ import ( "github.com/agiledragon/gomonkey/v2" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/crypto/kzg4844" - "github.com/scroll-tech/go-ethereum/params" "github.com/smartystreets/goconvey/convey" "github.com/stretchr/testify/assert" "gorm.io/gorm" @@ -36,7 +35,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB { func testCreateNewL1Relayer(t *testing.T) { db := setupL1RelayerDB(t) defer database.CloseDB(db) - relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, ServiceTypeL1GasOracle, nil) + relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, ServiceTypeL1GasOracle, nil) assert.NoError(t, err) assert.NotNil(t, relayer) defer relayer.StopSenders() @@ -58,7 +57,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) { l1Cfg := cfg.L1Config ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ¶ms.ChainConfig{}, ServiceTypeL1GasOracle, nil) + l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ServiceTypeL1GasOracle, nil) assert.NoError(t, err) defer l1Relayer.StopSenders() @@ -91,7 +90,7 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) { l1Cfg := cfg.L1Config ctx, cancel := context.WithCancel(context.Background()) defer cancel() - l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ¶ms.ChainConfig{}, ServiceTypeL1GasOracle, nil) + l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, ServiceTypeL1GasOracle, nil) assert.NoError(t, err) assert.NotNil(t, l1Relayer) defer l1Relayer.StopSenders() @@ -141,14 +140,6 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) { return tmpInfo, nil }) - convey.Convey("setL1BaseFee failure", t, func() { - targetErr := errors.New("pack setL1BaseFee error") - patchGuard.ApplyMethodFunc(l1Relayer.l1GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) { - return nil, targetErr - }) - l1Relayer.ProcessGasPriceOracle() - }) - patchGuard.ApplyMethodFunc(l1Relayer.l1GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) { return []byte("for test"), nil }) diff --git a/rollup/internal/controller/relayer/l2_relayer.go b/rollup/internal/controller/relayer/l2_relayer.go index b90e70f9e0..33f6921a9b 100644 --- a/rollup/internal/controller/relayer/l2_relayer.go +++ b/rollup/internal/controller/relayer/l2_relayer.go @@ -71,28 +71,9 @@ type Layer2Relayer struct { // NewLayer2Relayer will return a new instance of Layer2RelayerClient func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) { - var gasOracleSender, commitSender, finalizeSender *sender.Sender var err error - // check that all 3 signer addresses are different, because there will be a problem in managing nonce for different senders - gasOracleSenderAddr, err := addrFromSignerConfig(cfg.GasOracleSenderSignerConfig) - if err != nil { - return nil, fmt.Errorf("failed to parse addr from gas oracle signer config, err: %v", err) - } - commitSenderAddr, err := addrFromSignerConfig(cfg.CommitSenderSignerConfig) - if err != nil { - return nil, fmt.Errorf("failed to parse addr from commit sender config, err: %v", err) - } - finalizeSenderAddr, err := addrFromSignerConfig(cfg.FinalizeSenderSignerConfig) - if err != nil { - return nil, fmt.Errorf("failed to parse addr from finalize sender config, err: %v", err) - } - if gasOracleSenderAddr == commitSenderAddr || gasOracleSenderAddr == finalizeSenderAddr || commitSenderAddr == finalizeSenderAddr { - return nil, fmt.Errorf("gas oracle, commit, and finalize sender addresses must be different. Got: Gas Oracle=%s, Commit=%s, Finalize=%s", - gasOracleSenderAddr.Hex(), commitSenderAddr.Hex(), finalizeSenderAddr.Hex()) - } - switch serviceType { case ServiceTypeL2GasOracle: gasOracleSender, err = sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderSignerConfig, "l2_relayer", "gas_oracle_sender", types.SenderTypeL2GasOracle, db, reg) @@ -106,6 +87,18 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm. } case ServiceTypeL2RollupRelayer: + commitSenderAddr, err := addrFromSignerConfig(cfg.CommitSenderSignerConfig) + if err != nil { + return nil, fmt.Errorf("failed to parse addr from commit sender config, err: %v", err) + } + finalizeSenderAddr, err := addrFromSignerConfig(cfg.FinalizeSenderSignerConfig) + if err != nil { + return nil, fmt.Errorf("failed to parse addr from finalize sender config, err: %v", err) + } + if commitSenderAddr == finalizeSenderAddr { + return nil, fmt.Errorf("commit and finalize sender addresses must be different. Got: Commit=%s, Finalize=%s", commitSenderAddr.Hex(), finalizeSenderAddr.Hex()) + } + commitSender, err = sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderSignerConfig, "l2_relayer", "commit_sender", types.SenderTypeCommitBatch, db, reg) if err != nil { return nil, fmt.Errorf("new commit sender failed, err: %w", err) @@ -161,7 +154,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm. } // chain_monitor client - if cfg.ChainMonitor.Enabled { + if serviceType == ServiceTypeL2RollupRelayer && cfg.ChainMonitor.Enabled { layer2Relayer.chainMonitorClient = resty.New() layer2Relayer.chainMonitorClient.SetRetryCount(cfg.ChainMonitor.TryTimes) layer2Relayer.chainMonitorClient.SetTimeout(time.Duration(cfg.ChainMonitor.TimeOut) * time.Second) @@ -394,6 +387,14 @@ func (r *Layer2Relayer) ProcessPendingBatches() { return } + // check codec version + for _, dbChunk := range dbChunks { + if dbBatch.CodecVersion != dbChunk.CodecVersion { + log.Error("batch codec version is different from chunk codec version", "batch index", dbBatch.Index, "chunk index", dbChunk.Index, "batch codec version", dbBatch.CodecVersion, "chunk codec version", dbChunk.CodecVersion) + return + } + } + chunks := make([]*encoding.Chunk, len(dbChunks)) for i, c := range dbChunks { blocks, getErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber) @@ -415,20 +416,19 @@ func (r *Layer2Relayer) ProcessPendingBatches() { return } + if dbParentBatch.CodecVersion > dbBatch.CodecVersion { + log.Error("parent batch codec version is greater than current batch codec version", "index", dbBatch.Index, "hash", dbBatch.Hash, "parent codec version", dbParentBatch.CodecVersion, "current codec version", dbBatch.CodecVersion) + return + } + var calldata []byte var blob *kzg4844.Blob codecVersion := encoding.CodecVersion(dbBatch.CodecVersion) switch codecVersion { - case encoding.CodecV0, encoding.CodecV1, encoding.CodecV2: - calldata, blob, err = r.constructCommitBatchPayloadCodecV0AndV1AndV2(dbBatch, dbParentBatch, dbChunks, chunks) + case encoding.CodecV4: + calldata, blob, err = r.constructCommitBatchPayloadCodecV4(dbBatch, dbParentBatch, dbChunks, chunks) if err != nil { - log.Error("failed to construct commitBatch payload for V0/V1/V2", "codecVersion", codecVersion, "index", dbBatch.Index, "err", err) - return - } - case encoding.CodecV3, encoding.CodecV4: - calldata, blob, err = r.constructCommitBatchPayloadCodecV3AndV4(dbBatch, dbParentBatch, dbChunks, chunks) - if err != nil { - log.Error("failed to construct commitBatchWithBlobProof payload for V3/V4", "codecVersion", codecVersion, "index", dbBatch.Index, "err", err) + log.Error("failed to construct commitBatchWithBlobProof payload for V4", "codecVersion", codecVersion, "index", dbBatch.Index, "err", err) return } default: @@ -488,69 +488,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() { } } -// ProcessCommittedBatches submit proof to layer 1 rollup contract -func (r *Layer2Relayer) ProcessCommittedBatches() { - // retrieves the earliest batch whose rollup status is 'committed' - fields := map[string]interface{}{ - "rollup_status": types.RollupCommitted, - } - orderByList := []string{"index ASC"} - limit := 1 - batches, err := r.batchOrm.GetBatches(r.ctx, fields, orderByList, limit) - if err != nil { - log.Error("Failed to fetch committed L2 batches", "err", err) - return - } - if len(batches) != 1 { - log.Warn("Unexpected result for GetBlockBatches", "number of batches", len(batches)) - return - } - - r.metrics.rollupL2RelayerProcessCommittedBatchesTotal.Inc() - - batch := batches[0] - status := types.ProvingStatus(batch.ProvingStatus) - switch status { - case types.ProvingTaskUnassigned, types.ProvingTaskAssigned: - if batch.CommittedAt == nil { - log.Error("batch.CommittedAt is nil", "index", batch.Index, "hash", batch.Hash) - return - } - - if r.cfg.EnableTestEnvBypassFeatures && utils.NowUTC().Sub(*batch.CommittedAt) > time.Duration(r.cfg.FinalizeBatchWithoutProofTimeoutSec)*time.Second { - if err := r.finalizeBatch(batch, false); err != nil { - log.Error("Failed to finalize timeout batch without proof", "index", batch.Index, "hash", batch.Hash, "err", err) - } - } - - case types.ProvingTaskVerified: - r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc() - if err := r.finalizeBatch(batch, true); err != nil { - log.Error("Failed to finalize batch with proof", "index", batch.Index, "hash", batch.Hash, "err", err) - } - - case types.ProvingTaskFailed: - // We were unable to prove this batch. There are two possibilities: - // (a) Prover bug. In this case, we should fix and redeploy the prover. - // In the meantime, we continue to commit batches to L1 as well as - // proposing and proving chunks and batches. - // (b) Unprovable batch, e.g. proof overflow. In this case we need to - // stop the ledger, fix the limit, revert all the violating blocks, - // chunks and batches and all subsequent ones, and resume, i.e. this - // case requires manual resolution. - log.Error( - "batch proving failed", - "Index", batch.Index, - "Hash", batch.Hash, - "ProvedAt", batch.ProvedAt, - "ProofTimeSec", batch.ProofTimeSec, - ) - - default: - log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status) - } -} - // ProcessPendingBundles submits proof to layer 1 rollup contract func (r *Layer2Relayer) ProcessPendingBundles() { r.metrics.rollupL2RelayerProcessPendingBundlesTotal.Inc() @@ -568,8 +505,26 @@ func (r *Layer2Relayer) ProcessPendingBundles() { switch status { case types.ProvingTaskUnassigned, types.ProvingTaskAssigned: if r.cfg.EnableTestEnvBypassFeatures && utils.NowUTC().Sub(bundle.CreatedAt) > time.Duration(r.cfg.FinalizeBundleWithoutProofTimeoutSec)*time.Second { + // check if last batch is finalized, because in fake finalize bundle mode, the contract does not verify if the previous bundle or batch is finalized. + if bundle.StartBatchIndex == 0 { + log.Error("invalid args: start batch index of bundle is 0", "bundle index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex) + return + } + + lastBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, bundle.StartBatchIndex-1) + if err != nil { + log.Error("failed to get last batch", "batch index", bundle.StartBatchIndex-1, "err", err) + return + } + + if types.RollupStatus(lastBatch.RollupStatus) != types.RollupFinalized { + log.Error("previous bundle or batch is not finalized", "batch index", lastBatch.Index, "batch hash", lastBatch.Hash, "rollup status", types.RollupStatus(lastBatch.RollupStatus)) + return + } + if err := r.finalizeBundle(bundle, false); err != nil { - log.Error("Failed to finalize timeout bundle without proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err) + log.Error("failed to finalize timeout bundle without proof", "bundle index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err) + return } } @@ -577,7 +532,8 @@ func (r *Layer2Relayer) ProcessPendingBundles() { log.Info("Start to roll up zk proof", "bundle hash", bundle.Hash) r.metrics.rollupL2RelayerProcessPendingBundlesFinalizedTotal.Inc() if err := r.finalizeBundle(bundle, true); err != nil { - log.Error("Failed to finalize bundle with proof", "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err) + log.Error("failed to finalize bundle with proof", "bundle index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err) + return } case types.ProvingTaskFailed: @@ -589,142 +545,49 @@ func (r *Layer2Relayer) ProcessPendingBundles() { // stop the ledger, fix the limit, revert all the violating blocks, // chunks, batches, bundles and all subsequent ones, and resume, // i.e. this case requires manual resolution. - log.Error("bundle proving failed", "index", bundle.Index, "hash", bundle.Hash, "proved at", bundle.ProvedAt, "proof time sec", bundle.ProofTimeSec) + log.Error("bundle proving failed", "bundle index", bundle.Index, "bundle hash", bundle.Hash, "proved at", bundle.ProvedAt, "proof time sec", bundle.ProofTimeSec) default: log.Error("encounter unreachable case in ProcessPendingBundles", "proving status", status) } } -func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error { - // Check batch status before sending `finalizeBatch` tx. - if r.cfg.ChainMonitor.Enabled { - var batchStatus bool - batchStatus, err := r.getBatchStatusByIndex(dbBatch) +func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error { + // Check if current bundle codec version is not less than the preceding one + if bundle.StartBatchIndex > 0 { + prevBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, bundle.StartBatchIndex-1) if err != nil { - r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc() - log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", dbBatch.Index, "err", err) + log.Error("failed to get previous batch", + "current bundle index", bundle.Index, + "start batch index", bundle.StartBatchIndex, + "error", err) return err } - if !batchStatus { - r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc() - log.Error("the batch status is false, stop finalize batch and check the reason", "batch_index", dbBatch.Index) - return errors.New("the batch status is false") + if bundle.CodecVersion < prevBatch.CodecVersion { + log.Error("current bundle codec version is less than the preceding batch", + "current bundle index", bundle.Index, + "current codec version", bundle.CodecVersion, + "prev batch index", prevBatch.Index, + "prev codec version", prevBatch.CodecVersion) + return errors.New("current bundle codec version cannot be less than the preceding batch") } } - if dbBatch.Index == 0 { - return errors.New("invalid args: batch index is 0, should only happen in finalizing genesis batch") - } - - dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1) - if getErr != nil { - return fmt.Errorf("failed to get batch, index: %d, err: %w", dbBatch.Index-1, getErr) - } - - dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex) - if err != nil { - return fmt.Errorf("failed to fetch chunks: %w", err) - } - - var aggProof *message.BatchProof - if withProof { - aggProof, getErr = r.batchOrm.GetVerifiedProofByHash(r.ctx, dbBatch.Hash) + // Check batch status before sending `finalizeBundle` tx. + for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ { + tmpBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex) if getErr != nil { - return fmt.Errorf("failed to get verified proof by hash, index: %d, err: %w", dbBatch.Index, getErr) - } - - if err = aggProof.SanityCheck(); err != nil { - return fmt.Errorf("failed to check agg_proof sanity, index: %d, err: %w", dbBatch.Index, err) - } - } - - var calldata []byte - codecVersion := encoding.GetCodecVersion(r.chainCfg, dbChunks[0].StartBlockNumber, dbChunks[0].StartBlockTime) - - switch codecVersion { - case encoding.CodecV0: - log.Info("Start to roll up zk proof", "batch hash", dbBatch.Hash) - calldata, err = r.constructFinalizeBatchPayloadCodecV0(dbBatch, dbParentBatch, aggProof) - if err != nil { - return fmt.Errorf("failed to construct finalizeBatch payload codecv0, index: %v, err: %w", dbBatch.Index, err) - } - - case encoding.CodecV1, encoding.CodecV2: - log.Info("Start to roll up zk proof", "batch hash", dbBatch.Hash) - chunks := make([]*encoding.Chunk, len(dbChunks)) - for i, c := range dbChunks { - blocks, dbErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber) - if dbErr != nil { - return fmt.Errorf("failed to fetch blocks: %w", dbErr) - } - chunks[i] = &encoding.Chunk{Blocks: blocks} - } - calldata, err = r.constructFinalizeBatchPayloadCodecV1AndV2(dbBatch, dbParentBatch, dbChunks, chunks, aggProof) - if err != nil { - return fmt.Errorf("failed to construct finalizeBatch payload codecv1, index: %v, err: %w", dbBatch.Index, err) + log.Error("failed to get batch by index", "batch index", batchIndex, "error", getErr) + return getErr } - case encoding.CodecV3, encoding.CodecV4: - log.Debug("using finalizeBundle instead", "index", dbBatch.Index, "codec version", codecVersion) - return nil - - default: - return fmt.Errorf("unsupported codec version: %v", codecVersion) - } - - txHash, err := r.finalizeSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0) - if err != nil { - log.Error( - "finalizeBatch in layer1 failed", - "with proof", withProof, - "index", dbBatch.Index, - "hash", dbBatch.Hash, - "RollupContractAddress", r.cfg.RollupContractAddress, - "err", err, - "calldata", common.Bytes2Hex(calldata), - ) - return err - } - - log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String()) - - // Updating rollup status in database. - if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupFinalizing); err != nil { - log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err) - return err - } - - // Updating the proving status when finalizing without proof, thus the coordinator could omit this task. - // it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus - if !withProof { - txErr := r.db.Transaction(func(dbTX *gorm.DB) error { - if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil { - return updateErr - } - if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified, dbTX); updateErr != nil { - return updateErr - } - return nil - }) - if txErr != nil { - log.Error("Updating chunk and batch proving status when finalizing without proof failure", "batchHash", dbBatch.Hash, "err", txErr) + // check codec version + if tmpBatch.CodecVersion != bundle.CodecVersion { + log.Error("bundle codec version is different from batch codec version", "bundle index", bundle.Index, "batch index", tmpBatch.Index, "bundle codec version", bundle.CodecVersion, "batch codec version", tmpBatch.CodecVersion) + return errors.New("bundle codec version is different from batch codec version") } - } - - r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc() - return nil -} -func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error { - // Check batch status before sending `finalizeBundle` tx. - if r.cfg.ChainMonitor.Enabled { - for batchIndex := bundle.StartBatchIndex; batchIndex <= bundle.EndBatchIndex; batchIndex++ { - tmpBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, batchIndex) - if getErr != nil { - log.Error("failed to get batch by index", "batch index", batchIndex, "error", getErr) - return getErr - } + if r.cfg.ChainMonitor.Enabled { batchStatus, getErr := r.getBatchStatusByIndex(tmpBatch) if getErr != nil { r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc() @@ -757,7 +620,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error } } - calldata, err := r.constructFinalizeBundlePayloadCodecV3AndV4(dbBatch, aggProof) + calldata, err := r.constructFinalizeBundlePayloadCodecV4(dbBatch, aggProof) if err != nil { return fmt.Errorf("failed to construct finalizeBundle payload codecv3, index: %v, err: %w", dbBatch.Index, err) } @@ -963,45 +826,7 @@ func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) { } } -func (r *Layer2Relayer) constructCommitBatchPayloadCodecV0AndV1AndV2(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) { - codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion)) - if err != nil { - return nil, nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err) - } - - batch := &encoding.Batch{ - Index: dbBatch.Index, - TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore, - ParentBatchHash: common.HexToHash(dbParentBatch.Hash), - Chunks: chunks, - } - - daBatch, createErr := codec.NewDABatch(batch) - if createErr != nil { - return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr) - } - - encodedChunks := make([][]byte, len(dbChunks)) - for i, c := range dbChunks { - daChunk, createErr := codec.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore) - if createErr != nil { - return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr) - } - daChunkBytes, encodeErr := daChunk.Encode() - if encodeErr != nil { - return nil, nil, fmt.Errorf("failed to encode DA chunk: %w", encodeErr) - } - encodedChunks[i] = daChunkBytes - } - - calldata, packErr := r.l1RollupABI.Pack("commitBatch", daBatch.Version(), dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap()) - if packErr != nil { - return nil, nil, fmt.Errorf("failed to pack commitBatch: %w", packErr) - } - return calldata, daBatch.Blob(), nil -} - -func (r *Layer2Relayer) constructCommitBatchPayloadCodecV3AndV4(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) { +func (r *Layer2Relayer) constructCommitBatchPayloadCodecV4(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) { batch := &encoding.Batch{ Index: dbBatch.Index, TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore, @@ -1043,91 +868,7 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV3AndV4(dbBatch *orm.Bat return calldata, daBatch.Blob(), nil } -func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV0(dbBatch *orm.Batch, dbParentBatch *orm.Batch, aggProof *message.BatchProof) ([]byte, error) { - if aggProof != nil { // finalizeBatch with proof. - calldata, packErr := r.l1RollupABI.Pack( - "finalizeBatchWithProof", - dbBatch.BatchHeader, - common.HexToHash(dbParentBatch.StateRoot), - common.HexToHash(dbBatch.StateRoot), - common.HexToHash(dbBatch.WithdrawRoot), - aggProof.Proof, - ) - if packErr != nil { - return nil, fmt.Errorf("failed to pack finalizeBatchWithProof: %w", packErr) - } - return calldata, nil - } - - // finalizeBatch without proof. - calldata, packErr := r.l1RollupABI.Pack( - "finalizeBatch", - dbBatch.BatchHeader, - common.HexToHash(dbParentBatch.StateRoot), - common.HexToHash(dbBatch.StateRoot), - common.HexToHash(dbBatch.WithdrawRoot), - ) - if packErr != nil { - return nil, fmt.Errorf("failed to pack finalizeBatch: %w", packErr) - } - return calldata, nil -} - -func (r *Layer2Relayer) constructFinalizeBatchPayloadCodecV1AndV2(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk, aggProof *message.BatchProof) ([]byte, error) { - batch := &encoding.Batch{ - Index: dbBatch.Index, - TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore, - ParentBatchHash: common.HexToHash(dbParentBatch.Hash), - Chunks: chunks, - } - - codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion)) - if err != nil { - return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err) - } - - daBatch, createErr := codec.NewDABatch(batch) - if createErr != nil { - return nil, fmt.Errorf("failed to create DA batch: %w", createErr) - } - - blobDataProof, getErr := daBatch.BlobDataProofForPointEvaluation() - if getErr != nil { - return nil, fmt.Errorf("failed to get blob data proof: %w", getErr) - } - - if aggProof != nil { // finalizeBatch4844 with proof. - calldata, packErr := r.l1RollupABI.Pack( - "finalizeBatchWithProof4844", - dbBatch.BatchHeader, - common.HexToHash(dbParentBatch.StateRoot), - common.HexToHash(dbBatch.StateRoot), - common.HexToHash(dbBatch.WithdrawRoot), - blobDataProof, - aggProof.Proof, - ) - if packErr != nil { - return nil, fmt.Errorf("failed to pack finalizeBatchWithProof4844: %w", packErr) - } - return calldata, nil - } - - // finalizeBatch4844 without proof. - calldata, packErr := r.l1RollupABI.Pack( - "finalizeBatch4844", - dbBatch.BatchHeader, - common.HexToHash(dbParentBatch.StateRoot), - common.HexToHash(dbBatch.StateRoot), - common.HexToHash(dbBatch.WithdrawRoot), - blobDataProof, - ) - if packErr != nil { - return nil, fmt.Errorf("failed to pack finalizeBatch4844: %w", packErr) - } - return calldata, nil -} - -func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV3AndV4(dbBatch *orm.Batch, aggProof *message.BundleProof) ([]byte, error) { +func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV4(dbBatch *orm.Batch, aggProof *message.BundleProof) ([]byte, error) { if aggProof != nil { // finalizeBundle with proof. calldata, packErr := r.l1RollupABI.Pack( "finalizeBundleWithProof", diff --git a/rollup/internal/controller/relayer/l2_relayer_test.go b/rollup/internal/controller/relayer/l2_relayer_test.go index c84c10e66a..a40513318d 100644 --- a/rollup/internal/controller/relayer/l2_relayer_test.go +++ b/rollup/internal/controller/relayer/l2_relayer_test.go @@ -51,21 +51,17 @@ func testCreateNewRelayer(t *testing.T) { } func testL2RelayerProcessPendingBatches(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} + codecVersions := []encoding.CodecVersion{encoding.CodecV4} for _, codecVersion := range codecVersions { db := setupL2RelayerDB(t) defer database.CloseDB(db) l2Cfg := cfg.L2Config var chainConfig *params.ChainConfig - if codecVersion == encoding.CodecV0 { - chainConfig = ¶ms.ChainConfig{} - } else if codecVersion == encoding.CodecV1 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - } else if codecVersion == encoding.CodecV2 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} + if codecVersion == encoding.CodecV4 { + chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)} } else { - chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + assert.Fail(t, "unsupported codec version, expected CodecV4") } relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) @@ -106,85 +102,16 @@ func testL2RelayerProcessPendingBatches(t *testing.T) { } } -func testL2RelayerProcessCommittedBatches(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2} - for _, codecVersion := range codecVersions { - db := setupL2RelayerDB(t) - defer database.CloseDB(db) - - l2Cfg := cfg.L2Config - var chainConfig *params.ChainConfig - if codecVersion == encoding.CodecV0 { - chainConfig = ¶ms.ChainConfig{} - } else if codecVersion == encoding.CodecV1 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - } else { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} - } - relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) - assert.NoError(t, err) - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - chunkOrm := orm.NewChunk(db) - _, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{}) - assert.NoError(t, err) - _, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{}) - assert.NoError(t, err) - - batch := &encoding.Batch{ - Index: 1, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk1, chunk2}, - } - - batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{}) - assert.NoError(t, err) - - err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted) - assert.NoError(t, err) - - err = batchOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified) - assert.NoError(t, err) - - relayer.ProcessCommittedBatches() - - statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash}) - assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - // no valid proof, rollup status remains the same - assert.Equal(t, types.RollupCommitted, statuses[0]) - - proof := &message.BatchProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - } - err = batchOrm.UpdateProofByHash(context.Background(), dbBatch.Hash, proof, 100) - assert.NoError(t, err) - - relayer.ProcessCommittedBatches() - statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash}) - assert.NoError(t, err) - assert.Equal(t, 1, len(statuses)) - assert.Equal(t, types.RollupFinalizing, statuses[0]) - relayer.StopSenders() - } -} - func testL2RelayerProcessPendingBundles(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV3} + codecVersions := []encoding.CodecVersion{encoding.CodecV4} for _, codecVersion := range codecVersions { db := setupL2RelayerDB(t) defer database.CloseDB(db) l2Cfg := cfg.L2Config var chainConfig *params.ChainConfig - if codecVersion == encoding.CodecV3 { - chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + if codecVersion == encoding.CodecV4 { + chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)} } relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) assert.NoError(t, err) @@ -235,79 +162,8 @@ func testL2RelayerProcessPendingBundles(t *testing.T) { } } -func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2} - for _, codecVersion := range codecVersions { - db := setupL2RelayerDB(t) - defer database.CloseDB(db) - - l2Cfg := cfg.L2Config - l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true - l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0 - var chainConfig *params.ChainConfig - if codecVersion == encoding.CodecV0 { - chainConfig = ¶ms.ChainConfig{} - } else if codecVersion == encoding.CodecV1 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - } else { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} - } - relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) - assert.NoError(t, err) - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - chunkOrm := orm.NewChunk(db) - chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{}) - assert.NoError(t, err) - chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{}) - assert.NoError(t, err) - - batch := &encoding.Batch{ - Index: 1, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk1, chunk2}, - } - - batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{}) - assert.NoError(t, err) - - err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted) - assert.NoError(t, err) - - err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil) - assert.NoError(t, err) - - assert.Eventually(t, func() bool { - relayer.ProcessCommittedBatches() - - batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0) - if batchErr != nil { - return false - } - - batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing && - types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified - - chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash) - if chunkErr != nil { - return false - } - - chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified && - types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified - - return batchStatus && chunkStatus - }, 5*time.Second, 100*time.Millisecond, "Batch or Chunk status did not update as expected") - relayer.StopSenders() - } -} - func testL2RelayerFinalizeTimeoutBundles(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV3} + codecVersions := []encoding.CodecVersion{encoding.CodecV4} for _, codecVersion := range codecVersions { db := setupL2RelayerDB(t) defer database.CloseDB(db) @@ -316,8 +172,8 @@ func testL2RelayerFinalizeTimeoutBundles(t *testing.T) { l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true l2Cfg.RelayerConfig.FinalizeBundleWithoutProofTimeoutSec = 0 var chainConfig *params.ChainConfig - if codecVersion == encoding.CodecV3 { - chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + if codecVersion == encoding.CodecV4 { + chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)} } relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil) assert.NoError(t, err) @@ -443,62 +299,6 @@ func testL2RelayerCommitConfirm(t *testing.T) { assert.True(t, ok) } -func testL2RelayerFinalizeBatchConfirm(t *testing.T) { - db := setupL2RelayerDB(t) - defer database.CloseDB(db) - - // Create and set up the Layer2 Relayer. - l2Cfg := cfg.L2Config - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil) - assert.NoError(t, err) - defer l2Relayer.StopSenders() - - // Simulate message confirmations. - isSuccessful := []bool{true, false} - batchOrm := orm.NewBatch(db) - batchHashes := make([]string, len(isSuccessful)) - for i := range batchHashes { - batch := &encoding.Batch{ - Index: uint64(i + 1), - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk1, chunk2}, - } - - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{}) - assert.NoError(t, err) - batchHashes[i] = dbBatch.Hash - } - - for i, batchHash := range batchHashes { - l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{ - ContextID: batchHash, - IsSuccessful: isSuccessful[i], - TxHash: common.HexToHash("0x123456789abcdef"), - SenderType: types.SenderTypeFinalizeBatch, - }) - } - - // Check the database for the updated status using TryTimes. - ok := utils.TryTimes(5, func() bool { - expectedStatuses := []types.RollupStatus{ - types.RollupFinalized, - types.RollupFinalizeFailed, - } - - for i, batchHash := range batchHashes { - batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHash}, nil, 0) - if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] { - return false - } - } - return true - }) - assert.True(t, ok) -} - func testL2RelayerFinalizeBundleConfirm(t *testing.T) { db := setupL2RelayerDB(t) defer database.CloseDB(db) @@ -529,7 +329,7 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) { assert.NoError(t, err) batchHashes[i] = dbBatch.Hash - bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV3) + bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV4) assert.NoError(t, err) bundleHashes[i] = bundle.Hash diff --git a/rollup/internal/controller/relayer/relayer_test.go b/rollup/internal/controller/relayer/relayer_test.go index c9d33fef16..bf78c9518d 100644 --- a/rollup/internal/controller/relayer/relayer_test.go +++ b/rollup/internal/controller/relayer/relayer_test.go @@ -124,12 +124,9 @@ func TestFunctions(t *testing.T) { // Run l2 relayer test cases. t.Run("TestCreateNewRelayer", testCreateNewRelayer) t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches) - t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches) t.Run("TestL2RelayerProcessPendingBundles", testL2RelayerProcessPendingBundles) - t.Run("TestL2RelayerFinalizeTimeoutBatches", testL2RelayerFinalizeTimeoutBatches) t.Run("TestL2RelayerFinalizeTimeoutBundles", testL2RelayerFinalizeTimeoutBundles) t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm) - t.Run("TestL2RelayerFinalizeBatchConfirm", testL2RelayerFinalizeBatchConfirm) t.Run("TestL2RelayerFinalizeBundleConfirm", testL2RelayerFinalizeBundleConfirm) t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm) t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle) diff --git a/rollup/internal/controller/sender/sender.go b/rollup/internal/controller/sender/sender.go index 051c01b79d..a36be19123 100644 --- a/rollup/internal/controller/sender/sender.go +++ b/rollup/internal/controller/sender/sender.go @@ -175,7 +175,6 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc() var ( feeData *FeeData - tx *gethTypes.Transaction sidecar *gethTypes.BlobTxSidecar err error ) @@ -217,20 +216,41 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err) } - if tx, err = s.createAndSendTx(feeData, target, data, sidecar, nil); err != nil { + signedTx, err := s.createTx(feeData, target, data, sidecar, nil) + if err != nil { s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc() - log.Error("failed to create and send tx (non-resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err) - return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err) + log.Error("failed to create signed tx (non-resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err) + return common.Hash{}, fmt.Errorf("failed to create signed transaction, err: %w", err) } - if err = s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, contextID, s.getSenderMeta(), tx, blockNumber); err != nil { + // Insert the transaction into the pending transaction table. + // A corner case is that the transaction is inserted into the table but not sent to the chain, because the server is stopped in the middle. + // This case will be handled by the checkPendingTransaction function. + if err = s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, contextID, s.getSenderMeta(), signedTx, blockNumber); err != nil { log.Error("failed to insert transaction", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err) return common.Hash{}, fmt.Errorf("failed to insert transaction, err: %w", err) } - return tx.Hash(), nil + + if err := s.client.SendTransaction(s.ctx, signedTx); err != nil { + // Delete the transaction from the pending transaction table if it fails to send. + if updateErr := s.pendingTransactionOrm.DeleteTransactionByTxHash(s.ctx, signedTx.Hash()); updateErr != nil { + log.Error("failed to delete transaction", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", updateErr) + return common.Hash{}, fmt.Errorf("failed to delete transaction, err: %w", updateErr) + } + + log.Error("failed to send tx", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", err) + // Check if contain nonce, and reset nonce + // only reset nonce when it is not from resubmit + if strings.Contains(err.Error(), "nonce too low") { + s.resetNonce(context.Background()) + } + return common.Hash{}, fmt.Errorf("failed to send transaction, err: %w", err) + } + + return signedTx.Hash(), nil } -func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, overrideNonce *uint64) (*gethTypes.Transaction, error) { +func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, overrideNonce *uint64) (*gethTypes.Transaction, error) { var ( nonce = s.transactionSigner.GetNonce() txData gethTypes.TxData @@ -292,14 +312,9 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, data return nil, err } - if err = s.client.SendTransaction(s.ctx, signedTx); err != nil { - log.Error("failed to send tx", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", err) - // Check if contain nonce, and reset nonce - // only reset nonce when it is not from resubmit - if strings.Contains(err.Error(), "nonce too low") && overrideNonce == nil { - s.resetNonce(context.Background()) - } - return nil, err + // update nonce when it is not from resubmit + if overrideNonce == nil { + s.transactionSigner.SetNonce(nonce + 1) } if feeData.gasTipCap != nil { @@ -320,10 +335,6 @@ func (s *Sender) createAndSendTx(feeData *FeeData, target *common.Address, data s.metrics.currentGasLimit.WithLabelValues(s.service, s.name).Set(float64(feeData.gasLimit)) - // update nonce when it is not from resubmit - if overrideNonce == nil { - s.transactionSigner.SetNonce(nonce + 1) - } return signedTx, nil } @@ -337,7 +348,7 @@ func (s *Sender) resetNonce(ctx context.Context) { s.transactionSigner.SetNonce(nonce) } -func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBaseFee uint64) (*gethTypes.Transaction, error) { +func (s *Sender) createReplacingTransaction(tx *gethTypes.Transaction, baseFee, blobBaseFee uint64) (*gethTypes.Transaction, error) { escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum) escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen) maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice) @@ -357,6 +368,10 @@ func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBas originalGasPrice := tx.GasPrice() gasPrice := new(big.Int).Mul(originalGasPrice, escalateMultipleNum) gasPrice = new(big.Int).Div(gasPrice, escalateMultipleDen) + baseFeeInt := new(big.Int).SetUint64(baseFee) + if gasPrice.Cmp(baseFeeInt) < 0 { + gasPrice = baseFeeInt + } if gasPrice.Cmp(maxGasPrice) > 0 { gasPrice = maxGasPrice } @@ -449,6 +464,15 @@ func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBas blobGasFeeCap = maxBlobGasPrice } + // Check if any fee cap is less than double + doubledTipCap := new(big.Int).Mul(originalGasTipCap, big.NewInt(2)) + doubledFeeCap := new(big.Int).Mul(originalGasFeeCap, big.NewInt(2)) + doubledBlobFeeCap := new(big.Int).Mul(originalBlobGasFeeCap, big.NewInt(2)) + if gasTipCap.Cmp(doubledTipCap) < 0 || gasFeeCap.Cmp(doubledFeeCap) < 0 || blobGasFeeCap.Cmp(doubledBlobFeeCap) < 0 { + log.Error("gas fees must be at least double", "originalTipCap", originalGasTipCap, "currentTipCap", gasTipCap, "requiredTipCap", doubledTipCap, "originalFeeCap", originalGasFeeCap, "currentFeeCap", gasFeeCap, "requiredFeeCap", doubledFeeCap, "originalBlobFeeCap", originalBlobGasFeeCap, "currentBlobFeeCap", blobGasFeeCap, "requiredBlobFeeCap", doubledBlobFeeCap) + return nil, errors.New("gas fees must be at least double") + } + feeData.gasFeeCap = gasFeeCap feeData.gasTipCap = gasTipCap feeData.blobGasFeeCap = blobGasFeeCap @@ -468,12 +492,12 @@ func (s *Sender) resubmitTransaction(tx *gethTypes.Transaction, baseFee, blobBas nonce := tx.Nonce() s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc() - tx, err := s.createAndSendTx(&feeData, tx.To(), tx.Data(), tx.BlobTxSidecar(), &nonce) + signedTx, err := s.createTx(&feeData, tx.To(), tx.Data(), tx.BlobTxSidecar(), &nonce) if err != nil { - log.Error("failed to create and send tx (resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", nonce, "err", err) + log.Error("failed to create signed tx (resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", nonce, "err", err) return nil, err } - return tx, nil + return signedTx, nil } // checkPendingTransaction checks the confirmation status of pending transactions against the latest confirmed block number. @@ -500,30 +524,29 @@ func (s *Sender) checkPendingTransaction() { } for _, txnToCheck := range transactionsToCheck { - tx := new(gethTypes.Transaction) - if err := tx.DecodeRLP(rlp.NewStream(bytes.NewReader(txnToCheck.RLPEncoding), 0)); err != nil { + originalTx := new(gethTypes.Transaction) + if err := originalTx.DecodeRLP(rlp.NewStream(bytes.NewReader(txnToCheck.RLPEncoding), 0)); err != nil { log.Error("failed to decode RLP", "context ID", txnToCheck.ContextID, "sender meta", s.getSenderMeta(), "err", err) continue } - receipt, err := s.client.TransactionReceipt(s.ctx, tx.Hash()) + receipt, err := s.client.TransactionReceipt(s.ctx, originalTx.Hash()) if err == nil { // tx confirmed. if receipt.BlockNumber.Uint64() <= confirmed { - err := s.db.Transaction(func(dbTX *gorm.DB) error { + if dbTxErr := s.db.Transaction(func(dbTX *gorm.DB) error { // Update the status of the transaction to TxStatusConfirmed. - if err := s.pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(s.ctx, tx.Hash(), types.TxStatusConfirmed, dbTX); err != nil { - log.Error("failed to update transaction status by tx hash", "hash", tx.Hash().String(), "sender meta", s.getSenderMeta(), "from", s.transactionSigner.GetAddr().String(), "nonce", tx.Nonce(), "err", err) - return err + if updateErr := s.pendingTransactionOrm.UpdateTransactionStatusByTxHash(s.ctx, originalTx.Hash(), types.TxStatusConfirmed, dbTX); updateErr != nil { + log.Error("failed to update transaction status by tx hash", "hash", originalTx.Hash().String(), "sender meta", s.getSenderMeta(), "from", s.transactionSigner.GetAddr().String(), "nonce", originalTx.Nonce(), "err", updateErr) + return updateErr } // Update other transactions with the same nonce and sender address as failed. - if err := s.pendingTransactionOrm.UpdateOtherTransactionsAsFailedByNonce(s.ctx, txnToCheck.SenderAddress, tx.Nonce(), tx.Hash(), dbTX); err != nil { - log.Error("failed to update other transactions as failed by nonce", "senderAddress", txnToCheck.SenderAddress, "nonce", tx.Nonce(), "excludedTxHash", tx.Hash(), "err", err) - return err + if updateErr := s.pendingTransactionOrm.UpdateOtherTransactionsAsFailedByNonce(s.ctx, txnToCheck.SenderAddress, originalTx.Nonce(), originalTx.Hash(), dbTX); updateErr != nil { + log.Error("failed to update other transactions as failed by nonce", "senderAddress", txnToCheck.SenderAddress, "nonce", originalTx.Nonce(), "excludedTxHash", originalTx.Hash(), "err", updateErr) + return updateErr } return nil - }) - if err != nil { - log.Error("db transaction failed after receiving confirmation", "err", err) + }); dbTxErr != nil { + log.Error("db transaction failed after receiving confirmation", "err", dbTxErr) return } @@ -531,7 +554,7 @@ func (s *Sender) checkPendingTransaction() { s.confirmCh <- &Confirmation{ ContextID: txnToCheck.ContextID, IsSuccessful: receipt.Status == gethTypes.ReceiptStatusSuccessful, - TxHash: tx.Hash(), + TxHash: originalTx.Hash(), SenderType: s.senderType, } } @@ -548,52 +571,77 @@ func (s *Sender) checkPendingTransaction() { // early return if the previous transaction has not been confirmed yet. // currentNonce is already the confirmed nonce + 1. - if tx.Nonce() > currentNonce { - log.Debug("previous transaction not yet confirmed, skip bumping gas price", "address", txnToCheck.SenderAddress, "currentNonce", currentNonce, "txNonce", tx.Nonce()) + if originalTx.Nonce() > currentNonce { + log.Debug("previous transaction not yet confirmed, skip bumping gas price", "address", txnToCheck.SenderAddress, "currentNonce", currentNonce, "txNonce", originalTx.Nonce()) continue } // It's possible that the pending transaction was marked as failed earlier in this loop (e.g., if one of its replacements has already been confirmed). // Therefore, we fetch the current transaction status again for accuracy before proceeding. - status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(s.ctx, tx.Hash()) + status, err := s.pendingTransactionOrm.GetTxStatusByTxHash(s.ctx, originalTx.Hash()) if err != nil { - log.Error("failed to get transaction status by tx hash", "hash", tx.Hash().String(), "err", err) + log.Error("failed to get transaction status by tx hash", "hash", originalTx.Hash().String(), "err", err) return } if status == types.TxStatusConfirmedFailed { - log.Warn("transaction already marked as failed, skipping resubmission", "hash", tx.Hash().String()) + log.Warn("transaction already marked as failed, skipping resubmission", "hash", originalTx.Hash().String()) continue } log.Info("resubmit transaction", "service", s.service, "name", s.name, - "hash", tx.Hash().String(), + "hash", originalTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), - "nonce", tx.Nonce(), + "nonce", originalTx.Nonce(), "submitBlockNumber", txnToCheck.SubmitBlockNumber, "currentBlockNumber", blockNumber, "escalateBlocks", s.config.EscalateBlocks) - if newTx, err := s.resubmitTransaction(tx, baseFee, blobBaseFee); err != nil { + newSignedTx, err := s.createReplacingTransaction(originalTx, baseFee, blobBaseFee) + if err != nil { s.metrics.resubmitTransactionFailedTotal.WithLabelValues(s.service, s.name).Inc() - log.Error("failed to resubmit transaction", "context ID", txnToCheck.ContextID, "sender meta", s.getSenderMeta(), "from", s.transactionSigner.GetAddr().String(), "nonce", tx.Nonce(), "err", err) - } else { - err := s.db.Transaction(func(dbTX *gorm.DB) error { - // Update the status of the original transaction as replaced, while still checking its confirmation status. - if err := s.pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(s.ctx, tx.Hash(), types.TxStatusReplaced, dbTX); err != nil { - return fmt.Errorf("failed to update status of transaction with hash %s to TxStatusReplaced, err: %w", tx.Hash().String(), err) + log.Error("failed to resubmit transaction", "context ID", txnToCheck.ContextID, "sender meta", s.getSenderMeta(), "from", s.transactionSigner.GetAddr().String(), "nonce", originalTx.Nonce(), "err", err) + return + } + + // Update the status of the original transaction as replaced, while still checking its confirmation status. + // Insert the new transaction that has replaced the original one, and set the status as pending. + // A corner case is that the transaction is inserted into the table but not sent to the chain, because the server is stopped in the middle. + // This case will be handled by the checkPendingTransaction function. + if dbTxErr := s.db.Transaction(func(dbTX *gorm.DB) error { + if updateErr := s.pendingTransactionOrm.UpdateTransactionStatusByTxHash(s.ctx, originalTx.Hash(), types.TxStatusReplaced, dbTX); updateErr != nil { + return fmt.Errorf("failed to update status of transaction with hash %s to TxStatusReplaced, err: %w", newSignedTx.Hash().String(), updateErr) + } + if updateErr := s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, txnToCheck.ContextID, s.getSenderMeta(), newSignedTx, blockNumber, dbTX); updateErr != nil { + return fmt.Errorf("failed to insert new pending transaction with context ID: %s, nonce: %d, hash: %v, previous block number: %v, current block number: %v, err: %w", txnToCheck.ContextID, newSignedTx.Nonce(), newSignedTx.Hash().String(), txnToCheck.SubmitBlockNumber, blockNumber, updateErr) + } + return nil + }); dbTxErr != nil { + log.Error("db transaction failed after resubmitting", "err", dbTxErr) + return + } + + if err := s.client.SendTransaction(s.ctx, newSignedTx); err != nil { + // SendTransaction failed, need to rollback the previous database changes + if rollbackErr := s.db.Transaction(func(tx *gorm.DB) error { + // Restore original transaction status back to pending + if updateErr := s.pendingTransactionOrm.UpdateTransactionStatusByTxHash(s.ctx, originalTx.Hash(), types.TxStatusPending, tx); updateErr != nil { + return fmt.Errorf("failed to rollback status of original transaction, err: %w", updateErr) } - // Record the new transaction that has replaced the original one. - if err := s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, txnToCheck.ContextID, s.getSenderMeta(), newTx, blockNumber, dbTX); err != nil { - return fmt.Errorf("failed to insert new pending transaction with context ID: %s, nonce: %d, hash: %v, previous block number: %v, current block number: %v, err: %w", txnToCheck.ContextID, newTx.Nonce(), newTx.Hash().String(), txnToCheck.SubmitBlockNumber, blockNumber, err) + // Delete the new transaction that was inserted + if updateErr := s.pendingTransactionOrm.DeleteTransactionByTxHash(s.ctx, newSignedTx.Hash(), tx); updateErr != nil { + return fmt.Errorf("failed to delete new transaction, err: %w", updateErr) } return nil - }) - if err != nil { - log.Error("db transaction failed after resubmitting", "err", err) + }); rollbackErr != nil { + // Both SendTransaction and rollback failed + log.Error("failed to rollback database after SendTransaction failed", "tx hash", newSignedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", newSignedTx.Nonce(), "sendTxErr", err, "rollbackErr", rollbackErr) return } + + log.Error("failed to send replacing tx", "tx hash", newSignedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", newSignedTx.Nonce(), "err", err) + return } } } diff --git a/rollup/internal/controller/sender/sender_test.go b/rollup/internal/controller/sender/sender_test.go index 6f2af46c27..83fb045376 100644 --- a/rollup/internal/controller/sender/sender_test.go +++ b/rollup/internal/controller/sender/sender_test.go @@ -282,13 +282,17 @@ func testResubmitZeroGasPriceTransaction(t *testing.T) { gasFeeCap: big.NewInt(0), gasLimit: 50000, } - tx, err := s.createAndSendTx(feeData, &common.Address{}, nil, nil, nil) + tx, err := s.createTx(feeData, &common.Address{}, nil, nil, nil) assert.NoError(t, err) assert.NotNil(t, tx) + err = s.client.SendTransaction(s.ctx, tx) + assert.NoError(t, err) // Increase at least 1 wei in gas price, gas tip cap and gas fee cap. // Bumping the fees enough times to let the transaction be included in a block. for i := 0; i < 30; i++ { - tx, err = s.resubmitTransaction(tx, 0, 0) + tx, err = s.createReplacingTransaction(tx, 0, 0) + assert.NoError(t, err) + err = s.client.SendTransaction(s.ctx, tx) assert.NoError(t, err) } @@ -369,10 +373,14 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) { sidecar, err = makeSidecar(txBlob[i]) assert.NoError(t, err) } - tx, err := s.createAndSendTx(feeData, &common.Address{}, nil, sidecar, nil) + tx, err := s.createTx(feeData, &common.Address{}, nil, sidecar, nil) assert.NoError(t, err) assert.NotNil(t, tx) - resubmittedTx, err := s.resubmitTransaction(tx, 0, 0) + err = s.client.SendTransaction(s.ctx, tx) + assert.NoError(t, err) + resubmittedTx, err := s.createReplacingTransaction(tx, 0, 0) + assert.NoError(t, err) + err = s.client.SendTransaction(s.ctx, resubmittedTx) assert.NoError(t, err) assert.Eventually(t, func() bool { @@ -412,10 +420,14 @@ func testResubmitUnderpricedTransaction(t *testing.T) { gasFeeCap: big.NewInt(1000000000), gasLimit: 50000, } - tx, err := s.createAndSendTx(feeData, &common.Address{}, nil, nil, nil) + tx, err := s.createTx(feeData, &common.Address{}, nil, nil, nil) assert.NoError(t, err) assert.NotNil(t, tx) - _, err = s.resubmitTransaction(tx, 0, 0) + err = s.client.SendTransaction(s.ctx, tx) + assert.NoError(t, err) + resubmittedTx, err := s.createReplacingTransaction(tx, 0, 0) + assert.NoError(t, err) + err = s.client.SendTransaction(s.ctx, resubmittedTx) assert.Error(t, err, "replacement transaction underpriced") assert.Eventually(t, func() bool { @@ -462,7 +474,9 @@ func testResubmitDynamicFeeTransactionWithRisingBaseFee(t *testing.T) { // bump the basefee by 10x baseFeePerGas *= 10 // resubmit and check that the gas fee has been adjusted accordingly - newTx, err := s.resubmitTransaction(tx, baseFeePerGas, 0) + resubmittedTx, err := s.createReplacingTransaction(tx, baseFeePerGas, 0) + assert.NoError(t, err) + err = s.client.SendTransaction(s.ctx, resubmittedTx) assert.NoError(t, err) maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice) @@ -471,7 +485,7 @@ func testResubmitDynamicFeeTransactionWithRisingBaseFee(t *testing.T) { expectedGasFeeCap = maxGasPrice } - assert.Equal(t, expectedGasFeeCap.Uint64(), newTx.GasFeeCap().Uint64()) + assert.Equal(t, expectedGasFeeCap.Uint64(), resubmittedTx.GasFeeCap().Uint64()) s.Stop() } @@ -511,7 +525,9 @@ func testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee(t *testing.T) { baseFeePerGas *= 10 blobBaseFeePerGas *= 10 // resubmit and check that the gas fee has been adjusted accordingly - newTx, err := s.resubmitTransaction(tx, baseFeePerGas, blobBaseFeePerGas) + resubmittedTx, err := s.createReplacingTransaction(tx, baseFeePerGas, blobBaseFeePerGas) + assert.NoError(t, err) + err = s.client.SendTransaction(s.ctx, resubmittedTx) assert.NoError(t, err) maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice) @@ -526,8 +542,8 @@ func testResubmitBlobTransactionWithRisingBaseFeeAndBlobBaseFee(t *testing.T) { expectedBlobGasFeeCap = maxBlobGasPrice } - assert.Equal(t, expectedGasFeeCap.Uint64(), newTx.GasFeeCap().Uint64()) - assert.Equal(t, expectedBlobGasFeeCap.Uint64(), newTx.BlobGasFeeCap().Uint64()) + assert.Equal(t, expectedGasFeeCap.Uint64(), resubmittedTx.GasFeeCap().Uint64()) + assert.Equal(t, expectedBlobGasFeeCap.Uint64(), resubmittedTx.BlobGasFeeCap().Uint64()) s.Stop() } diff --git a/rollup/internal/controller/watcher/batch_proposer.go b/rollup/internal/controller/watcher/batch_proposer.go index ec25c8f247..8b69cd9c25 100644 --- a/rollup/internal/controller/watcher/batch_proposer.go +++ b/rollup/internal/controller/watcher/batch_proposer.go @@ -34,7 +34,8 @@ type BatchProposer struct { gasCostIncreaseMultiplier float64 maxUncompressedBatchBytesSize uint64 - chainCfg *params.ChainConfig + minCodecVersion encoding.CodecVersion + chainCfg *params.ChainConfig batchProposerCircleTotal prometheus.Counter proposeBatchFailureTotal prometheus.Counter @@ -58,7 +59,7 @@ type BatchProposer struct { } // NewBatchProposer creates a new BatchProposer instance. -func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer { +func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer { log.Info("new batch proposer", "maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch, "maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch, @@ -78,6 +79,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai batchTimeoutSec: cfg.BatchTimeoutSec, gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier, maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize, + minCodecVersion: minCodecVersion, chainCfg: chainCfg, batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ @@ -247,6 +249,11 @@ func (p *BatchProposer) proposeBatch() error { if codec == nil { return fmt.Errorf("failed to retrieve codec for block number %v and time %v", firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime) } + + if codec.Version() < p.minCodecVersion { + return fmt.Errorf("unsupported codec version: %v, expected at least %v", codec.Version(), p.minCodecVersion) + } + maxChunksThisBatch := codec.MaxNumChunksPerBatch() // select at most maxChunkNumPerBatch chunks diff --git a/rollup/internal/controller/watcher/batch_proposer_test.go b/rollup/internal/controller/watcher/batch_proposer_test.go index 428f7226f7..7c426d5b10 100644 --- a/rollup/internal/controller/watcher/batch_proposer_test.go +++ b/rollup/internal/controller/watcher/batch_proposer_test.go @@ -20,7 +20,7 @@ import ( "scroll-tech/rollup/internal/utils" ) -func testBatchProposerCodecv0Limits(t *testing.T) { +func testBatchProposerLimitsCodecV4(t *testing.T) { tests := []struct { name string maxL1CommitGas uint64 @@ -60,785 +60,118 @@ func testBatchProposerCodecv0Limits(t *testing.T) { }, { name: "MaxL1CommitGasPerBatchIsFirstChunk", - maxL1CommitGas: 200000, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 1, - }, - { - name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 298, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - // Add genesis batch. - block := &encoding.Block{ - Header: &gethTypes.Header{ - Number: big.NewInt(0), - }, - RowConsumption: &gethTypes.RowConsumption{}, - } - chunk := &encoding.Chunk{ - Blocks: []*encoding.Block{block}, - } - chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) - assert.NoError(t, err) - batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk}, - } - batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) - assert.NoError(t, err) - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 1, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1000000, - ChunkTimeoutSec: 300, - GasCostIncreaseMultiplier: 1.2, - }, ¶ms.ChainConfig{}, db, nil) - cp.TryProposeChunk() // chunk1 contains block1 - cp.TryProposeChunk() // chunk2 contains block2 - - chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) - assert.NoError(t, err) - assert.Equal(t, uint64(5082), chunks[0].TotalL1CommitGas) - assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize) - assert.Equal(t, uint64(93658), chunks[1].TotalL1CommitGas) - assert.Equal(t, uint64(5737), chunks[1].TotalL1CommitCalldataSize) - - bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: tt.maxL1CommitGas, - MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize, - BatchTimeoutSec: tt.batchTimeoutSec, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{}, db, nil) - bp.TryProposeBatch() - - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, batches, tt.expectedBatchesLen+1) - batches = batches[1:] - if tt.expectedBatchesLen > 0 { - assert.Equal(t, uint64(1), batches[0].StartChunkIndex) - assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex) - assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) - - dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch) - assert.NoError(t, err) - assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch)) - for _, chunk := range dbChunks { - assert.Equal(t, batches[0].Hash, chunk.BatchHash) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) - } - } - }) - } -} - -func testBatchProposerCodecv1Limits(t *testing.T) { - tests := []struct { - name string - maxL1CommitGas uint64 - maxL1CommitCalldataSize uint64 - batchTimeoutSec uint64 - expectedBatchesLen int - expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 - }{ - { - name: "NoLimitReached", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 0, - }, - { - name: "Timeout", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 0, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 2, - }, - { - name: "MaxL1CommitGasPerBatchIs0", - maxL1CommitGas: 0, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 0, - }, - { - name: "MaxL1CommitCalldataSizePerBatchIs0", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 0, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 0, - }, - { - name: "MaxL1CommitGasPerBatchIsFirstChunk", - maxL1CommitGas: 190330, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 1, - }, - { - name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 60, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - // Add genesis batch. - block := &encoding.Block{ - Header: &gethTypes.Header{ - Number: big.NewInt(0), - }, - RowConsumption: &gethTypes.RowConsumption{}, - } - chunk := &encoding.Chunk{ - Blocks: []*encoding.Block{block}, - } - chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) - assert.NoError(t, err) - batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk}, - } - batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) - assert.NoError(t, err) - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 1, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1000000, - ChunkTimeoutSec: 300, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{ - BernoulliBlock: big.NewInt(0), - }, db, nil) - cp.TryProposeChunk() // chunk1 contains block1 - cp.TryProposeChunk() // chunk2 contains block2 - - chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) - assert.NoError(t, err) - assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas) - assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize) - assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas) - assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize) - - bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: tt.maxL1CommitGas, - MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize, - BatchTimeoutSec: tt.batchTimeoutSec, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{ - BernoulliBlock: big.NewInt(0), - }, db, nil) - bp.TryProposeBatch() - - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, batches, tt.expectedBatchesLen+1) - batches = batches[1:] - if tt.expectedBatchesLen > 0 { - assert.Equal(t, uint64(1), batches[0].StartChunkIndex) - assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex) - assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) - - dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch) - assert.NoError(t, err) - assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch)) - for _, chunk := range dbChunks { - assert.Equal(t, batches[0].Hash, chunk.BatchHash) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) - } - } - }) - } -} - -func testBatchProposerCodecv2Limits(t *testing.T) { - tests := []struct { - name string - maxL1CommitGas uint64 - maxL1CommitCalldataSize uint64 - batchTimeoutSec uint64 - expectedBatchesLen int - expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 - }{ - { - name: "NoLimitReached", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 0, - }, - { - name: "Timeout", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 0, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 2, - }, - { - name: "MaxL1CommitGasPerBatchIs0", - maxL1CommitGas: 0, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 0, - }, - { - name: "MaxL1CommitCalldataSizePerBatchIs0", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 0, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 0, - }, - { - name: "MaxL1CommitGasPerBatchIsFirstChunk", - maxL1CommitGas: 189179, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 1, - }, - { - name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 60, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - // Add genesis batch. - block := &encoding.Block{ - Header: &gethTypes.Header{ - Number: big.NewInt(0), - }, - RowConsumption: &gethTypes.RowConsumption{}, - } - chunk := &encoding.Chunk{ - Blocks: []*encoding.Block{block}, - } - chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) - assert.NoError(t, err) - batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk}, - } - batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) - assert.NoError(t, err) - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 1, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1000000, - ChunkTimeoutSec: 300, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{ - BernoulliBlock: big.NewInt(0), - CurieBlock: big.NewInt(0), - }, db, nil) - cp.TryProposeChunk() // chunk1 contains block1 - cp.TryProposeChunk() // chunk2 contains block2 - - chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) - assert.NoError(t, err) - assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas) - assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize) - assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas) - assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize) - - bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: tt.maxL1CommitGas, - MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize, - BatchTimeoutSec: tt.batchTimeoutSec, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{ - BernoulliBlock: big.NewInt(0), - CurieBlock: big.NewInt(0), - }, db, nil) - bp.TryProposeBatch() - - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, batches, tt.expectedBatchesLen+1) - batches = batches[1:] - if tt.expectedBatchesLen > 0 { - assert.Equal(t, uint64(1), batches[0].StartChunkIndex) - assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex) - assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) - - dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch) - assert.NoError(t, err) - assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch)) - for _, chunk := range dbChunks { - assert.Equal(t, batches[0].Hash, chunk.BatchHash) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) - } - } - }) - } -} - -func testBatchProposerCodecv3Limits(t *testing.T) { - tests := []struct { - name string - maxL1CommitGas uint64 - maxL1CommitCalldataSize uint64 - batchTimeoutSec uint64 - expectedBatchesLen int - expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0 - }{ - { - name: "NoLimitReached", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 0, - }, - { - name: "Timeout", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 0, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 2, - }, - { - name: "MaxL1CommitGasPerBatchIs0", - maxL1CommitGas: 0, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 0, - }, - { - name: "MaxL1CommitCalldataSizePerBatchIs0", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 0, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 0, - }, - { - name: "MaxL1CommitGasPerBatchIsFirstChunk", - maxL1CommitGas: 249179, - maxL1CommitCalldataSize: 1000000, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 1, - }, - { - name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk", - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 60, - batchTimeoutSec: 1000000000000, - expectedBatchesLen: 1, - expectedChunksInFirstBatch: 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - // Add genesis batch. - block := &encoding.Block{ - Header: &gethTypes.Header{ - Number: big.NewInt(0), - }, - RowConsumption: &gethTypes.RowConsumption{}, - } - chunk := &encoding.Chunk{ - Blocks: []*encoding.Block{block}, - } - chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) - assert.NoError(t, err) - batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk}, - } - batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) - assert.NoError(t, err) - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 1, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1000000, - ChunkTimeoutSec: 300, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{ - LondonBlock: big.NewInt(0), - BernoulliBlock: big.NewInt(0), - CurieBlock: big.NewInt(0), - DarwinTime: new(uint64), - }, db, nil) - cp.TryProposeChunk() // chunk1 contains block1 - cp.TryProposeChunk() // chunk2 contains block2 - - chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) - assert.NoError(t, err) - assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas) - assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize) - assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas) - assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize) - - bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: tt.maxL1CommitGas, - MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize, - BatchTimeoutSec: tt.batchTimeoutSec, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{ - LondonBlock: big.NewInt(0), - BernoulliBlock: big.NewInt(0), - CurieBlock: big.NewInt(0), - DarwinTime: new(uint64), - }, db, nil) - bp.TryProposeBatch() - - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, batches, tt.expectedBatchesLen+1) - batches = batches[1:] - if tt.expectedBatchesLen > 0 { - assert.Equal(t, uint64(1), batches[0].StartChunkIndex) - assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex) - assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) - - dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch) - assert.NoError(t, err) - assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch)) - for _, chunk := range dbChunks { - assert.Equal(t, batches[0].Hash, chunk.BatchHash) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) - } - } - }) - } -} - -func testBatchCommitGasAndCalldataSizeCodecv0Estimation(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - // Add genesis batch. - block := &encoding.Block{ - Header: &gethTypes.Header{ - Number: big.NewInt(0), - }, - RowConsumption: &gethTypes.RowConsumption{}, - } - chunk := &encoding.Chunk{ - Blocks: []*encoding.Block{block}, - } - chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) - assert.NoError(t, err) - batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk}, - } - batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) - assert.NoError(t, err) - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 1, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1000000, - ChunkTimeoutSec: 300, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{}, db, nil) - cp.TryProposeChunk() // chunk1 contains block1 - cp.TryProposeChunk() // chunk2 contains block2 - - chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) - assert.NoError(t, err) - assert.Equal(t, uint64(5082), chunks[0].TotalL1CommitGas) - assert.Equal(t, uint64(298), chunks[0].TotalL1CommitCalldataSize) - assert.Equal(t, uint64(93658), chunks[1].TotalL1CommitGas) - assert.Equal(t, uint64(5737), chunks[1].TotalL1CommitCalldataSize) - - bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: 50000000000, - MaxL1CommitCalldataSizePerBatch: 1000000, - BatchTimeoutSec: 0, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{}, db, nil) - bp.TryProposeBatch() - - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, batches, 2) - batches = batches[1:] - assert.Equal(t, uint64(1), batches[0].StartChunkIndex) - assert.Equal(t, uint64(2), batches[0].EndChunkIndex) - assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) - - dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) - assert.NoError(t, err) - assert.Len(t, dbChunks, 2) - for _, chunk := range dbChunks { - assert.Equal(t, batches[0].Hash, chunk.BatchHash) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) - } - - assert.Equal(t, uint64(256463), batches[0].TotalL1CommitGas) - assert.Equal(t, uint64(6035), batches[0].TotalL1CommitCalldataSize) -} - -func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - // Add genesis batch. - block := &encoding.Block{ - Header: &gethTypes.Header{ - Number: big.NewInt(0), - }, - RowConsumption: &gethTypes.RowConsumption{}, - } - chunk := &encoding.Chunk{ - Blocks: []*encoding.Block{block}, - } - chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) - assert.NoError(t, err) - batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk}, - } - batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) - assert.NoError(t, err) - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 1, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1000000, - ChunkTimeoutSec: 300, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil) - cp.TryProposeChunk() // chunk1 contains block1 - cp.TryProposeChunk() // chunk2 contains block2 - - chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) - assert.NoError(t, err) - assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas) - assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize) - assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas) - assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize) - - bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: 50000000000, - MaxL1CommitCalldataSizePerBatch: 1000000, - BatchTimeoutSec: 0, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil) - bp.TryProposeBatch() - - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, batches, 2) - batches = batches[1:] - assert.Equal(t, uint64(1), batches[0].StartChunkIndex) - assert.Equal(t, uint64(2), batches[0].EndChunkIndex) - assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) - - dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) - assert.NoError(t, err) - assert.Len(t, dbChunks, 2) - for _, chunk := range dbChunks { - assert.Equal(t, batches[0].Hash, chunk.BatchHash) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) + maxL1CommitGas: 249179, + maxL1CommitCalldataSize: 1000000, + batchTimeoutSec: 1000000000000, + expectedBatchesLen: 1, + expectedChunksInFirstBatch: 1, + }, + { + name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk", + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 60, + batchTimeoutSec: 1000000000000, + expectedBatchesLen: 1, + expectedChunksInFirstBatch: 1, + }, } - assert.Equal(t, uint64(159350), batches[0].TotalL1CommitGas) - assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize) -} - -func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) - // Add genesis batch. - block := &encoding.Block{ - Header: &gethTypes.Header{ - Number: big.NewInt(0), - }, - RowConsumption: &gethTypes.RowConsumption{}, - } - chunk := &encoding.Chunk{ - Blocks: []*encoding.Block{block}, - } - chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) - assert.NoError(t, err) - batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk}, - } - batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) - assert.NoError(t, err) + // Add genesis batch. + block := &encoding.Block{ + Header: &gethTypes.Header{ + Number: big.NewInt(0), + }, + RowConsumption: &gethTypes.RowConsumption{}, + } + chunk := &encoding.Chunk{ + Blocks: []*encoding.Block{block}, + } + chunkOrm := orm.NewChunk(db) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) + assert.NoError(t, err) + batch := &encoding.Batch{ + Index: 0, + TotalL1MessagePoppedBefore: 0, + ParentBatchHash: common.Hash{}, + Chunks: []*encoding.Chunk{chunk}, + } + batchOrm := orm.NewBatch(db) + _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) + assert.NoError(t, err) - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) + l2BlockOrm := orm.NewL2Block(db) + err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) + assert.NoError(t, err) - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 1, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1000000, - ChunkTimeoutSec: 300, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil) - cp.TryProposeChunk() // chunk1 contains block1 - cp.TryProposeChunk() // chunk2 contains block2 + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: 1, + MaxTxNumPerChunk: 10000, + MaxL1CommitGasPerChunk: 50000000000, + MaxL1CommitCalldataSizePerChunk: 1000000, + MaxRowConsumptionPerChunk: 1000000, + ChunkTimeoutSec: 300, + GasCostIncreaseMultiplier: 1.2, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, encoding.CodecV4, ¶ms.ChainConfig{ + LondonBlock: big.NewInt(0), + BernoulliBlock: big.NewInt(0), + CurieBlock: big.NewInt(0), + DarwinTime: new(uint64), + DarwinV2Time: new(uint64), + }, db, nil) + cp.TryProposeChunk() // chunk1 contains block1 + cp.TryProposeChunk() // chunk2 contains block2 - chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) - assert.NoError(t, err) - assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas) - assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize) - assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas) - assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize) + chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) + assert.NoError(t, err) + assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas) + assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize) + assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas) + assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize) - bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: 50000000000, - MaxL1CommitCalldataSizePerBatch: 1000000, - BatchTimeoutSec: 0, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil) - bp.TryProposeBatch() + bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxL1CommitGasPerBatch: tt.maxL1CommitGas, + MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize, + BatchTimeoutSec: tt.batchTimeoutSec, + GasCostIncreaseMultiplier: 1.2, + MaxUncompressedBatchBytesSize: math.MaxUint64, + }, encoding.CodecV4, ¶ms.ChainConfig{ + LondonBlock: big.NewInt(0), + BernoulliBlock: big.NewInt(0), + CurieBlock: big.NewInt(0), + DarwinTime: new(uint64), + DarwinV2Time: new(uint64), + }, db, nil) + bp.TryProposeBatch() - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, batches, 2) - batches = batches[1:] - assert.Equal(t, uint64(1), batches[0].StartChunkIndex) - assert.Equal(t, uint64(2), batches[0].EndChunkIndex) - assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, batches, tt.expectedBatchesLen+1) + batches = batches[1:] + if tt.expectedBatchesLen > 0 { + assert.Equal(t, uint64(1), batches[0].StartChunkIndex) + assert.Equal(t, tt.expectedChunksInFirstBatch, batches[0].EndChunkIndex) + assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) - dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2) - assert.NoError(t, err) - assert.Len(t, dbChunks, 2) - for _, chunk := range dbChunks { - assert.Equal(t, batches[0].Hash, chunk.BatchHash) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) + dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, tt.expectedChunksInFirstBatch) + assert.NoError(t, err) + assert.Len(t, dbChunks, int(tt.expectedChunksInFirstBatch)) + for _, chunk := range dbChunks { + assert.Equal(t, batches[0].Hash, chunk.BatchHash) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) + } + } + }) } - - assert.Equal(t, uint64(159350), batches[0].TotalL1CommitGas) - assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize) } -func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) { +func testBatchCommitGasAndCalldataSizeEstimationCodecV4(t *testing.T) { db := setupDB(t) defer database.CloseDB(db) @@ -878,7 +211,7 @@ func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) { ChunkTimeoutSec: 300, GasCostIncreaseMultiplier: 1.2, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil) + }, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil) cp.TryProposeChunk() // chunk1 contains block1 cp.TryProposeChunk() // chunk2 contains block2 @@ -895,7 +228,7 @@ func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) { BatchTimeoutSec: 0, GasCostIncreaseMultiplier: 1.2, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil) + }, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil) bp.TryProposeBatch() batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) @@ -919,8 +252,8 @@ func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) { assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize) } -func testBatchProposerBlobSizeLimit(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} +func testBatchProposerBlobSizeLimitCodecV4(t *testing.T) { + codecVersions := []encoding.CodecVersion{encoding.CodecV4} for _, codecVersion := range codecVersions { db := setupDB(t) @@ -948,14 +281,10 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { assert.NoError(t, err) var chainConfig *params.ChainConfig - if codecVersion == encoding.CodecV0 { // will never hit blob size limit - chainConfig = ¶ms.ChainConfig{} - } else if codecVersion == encoding.CodecV1 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - } else if codecVersion == encoding.CodecV2 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} + if codecVersion == encoding.CodecV4 { + chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)} } else { - chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + assert.Fail(t, "unsupported codec version, expected CodecV4") } cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ @@ -967,7 +296,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { ChunkTimeoutSec: 0, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) blockHeight := int64(0) block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json") @@ -988,7 +317,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { BatchTimeoutSec: math.MaxUint32, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) for i := 0; i < 2; i++ { bp.TryProposeBatch() @@ -1000,18 +329,11 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { var expectedNumBatches int var numChunksMultiplier uint64 - if codecVersion == encoding.CodecV0 { - expectedNumBatches = 2 - numChunksMultiplier = 15 - } else if codecVersion == encoding.CodecV1 { - expectedNumBatches = 2 - numChunksMultiplier = 1 - } else if codecVersion == encoding.CodecV2 { + if codecVersion == encoding.CodecV4 { expectedNumBatches = 2 numChunksMultiplier = 45 } else { - expectedNumBatches = 2 - numChunksMultiplier = 45 + assert.Fail(t, "unsupported codec version, expected CodecV4") } assert.Len(t, batches, expectedNumBatches) @@ -1022,8 +344,8 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { } } -func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} +func testBatchProposerMaxChunkNumPerBatchLimitCodecV4(t *testing.T) { + codecVersions := []encoding.CodecVersion{encoding.CodecV4} for _, codecVersion := range codecVersions { db := setupDB(t) @@ -1052,18 +374,11 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { var expectedChunkNum uint64 var chainConfig *params.ChainConfig - if codecVersion == encoding.CodecV0 { - chainConfig = ¶ms.ChainConfig{} - expectedChunkNum = 15 - } else if codecVersion == encoding.CodecV1 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - expectedChunkNum = 15 - } else if codecVersion == encoding.CodecV2 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} + if codecVersion == encoding.CodecV4 { + chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)} expectedChunkNum = 45 } else { - chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} - expectedChunkNum = 45 + assert.Fail(t, "unsupported codec version, expected CodecV4") } cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ @@ -1075,7 +390,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { ChunkTimeoutSec: 0, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json") for blockHeight := int64(1); blockHeight <= 60; blockHeight++ { @@ -1091,7 +406,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { BatchTimeoutSec: math.MaxUint32, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) bp.TryProposeBatch() batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) @@ -1104,86 +419,3 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { database.CloseDB(db) } } - -func testBatchProposerRespectHardforks(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - chainConfig := ¶ms.ChainConfig{ - LondonBlock: big.NewInt(0), - BernoulliBlock: big.NewInt(1), - CurieBlock: big.NewInt(2), - DarwinTime: func() *uint64 { t := uint64(4); return &t }(), - } - - // Add genesis batch. - block := &encoding.Block{ - Header: &gethTypes.Header{ - Number: big.NewInt(0), - }, - RowConsumption: &gethTypes.RowConsumption{}, - } - chunk := &encoding.Chunk{ - Blocks: []*encoding.Block{block}, - } - chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) - assert.NoError(t, err) - batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk}, - } - batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: math.MaxUint64, - MaxTxNumPerChunk: math.MaxUint64, - MaxL1CommitGasPerChunk: math.MaxUint64, - MaxL1CommitCalldataSizePerChunk: math.MaxUint64, - MaxRowConsumptionPerChunk: math.MaxUint64, - ChunkTimeoutSec: 0, - GasCostIncreaseMultiplier: 1, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) - - block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json") - for i := int64(1); i <= 60; i++ { - block.Header.Number = big.NewInt(i) - block.Header.Time = uint64(i) - err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block}) - assert.NoError(t, err) - } - - for i := 0; i < 5; i++ { - cp.TryProposeChunk() - } - - bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: math.MaxUint64, - MaxL1CommitCalldataSizePerBatch: math.MaxUint64, - BatchTimeoutSec: 0, - GasCostIncreaseMultiplier: 1, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) - - for i := 0; i < 5; i++ { - bp.TryProposeBatch() - } - - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, batches, 4) - - expectedEndChunkIndices := []uint64{0, 1, 3, 4} - expectedEndBlockNumbers := []uint64{0, 1, 3, 60} - for i, batch := range batches { - assert.Equal(t, expectedEndChunkIndices[i], batch.EndChunkIndex) - chunk, err := chunkOrm.GetChunkByIndex(context.Background(), batch.EndChunkIndex) - assert.NoError(t, err) - assert.Equal(t, expectedEndBlockNumbers[i], chunk.EndBlockNumber) - } -} diff --git a/rollup/internal/controller/watcher/bundle_proposer.go b/rollup/internal/controller/watcher/bundle_proposer.go index 686ad580c0..1a37a6265a 100644 --- a/rollup/internal/controller/watcher/bundle_proposer.go +++ b/rollup/internal/controller/watcher/bundle_proposer.go @@ -3,6 +3,7 @@ package watcher import ( "context" "errors" + "fmt" "time" "github.com/prometheus/client_golang/prometheus" @@ -28,7 +29,8 @@ type BundleProposer struct { maxBatchNumPerBundle uint64 bundleTimeoutSec uint64 - chainCfg *params.ChainConfig + minCodecVersion encoding.CodecVersion + chainCfg *params.ChainConfig bundleProposerCircleTotal prometheus.Counter proposeBundleFailureTotal prometheus.Counter @@ -40,7 +42,7 @@ type BundleProposer struct { } // NewBundleProposer creates a new BundleProposer instance. -func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProposer { +func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BundleProposer { log.Info("new bundle proposer", "bundleBatchesNum", cfg.MaxBatchNumPerBundle, "bundleTimeoutSec", cfg.BundleTimeoutSec) p := &BundleProposer{ @@ -51,6 +53,7 @@ func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, ch bundleOrm: orm.NewBundle(db), maxBatchNumPerBundle: cfg.MaxBatchNumPerBundle, bundleTimeoutSec: cfg.BundleTimeoutSec, + minCodecVersion: minCodecVersion, chainCfg: chainCfg, bundleProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ @@ -130,7 +133,7 @@ func (p *BundleProposer) proposeBundle() error { // select at most maxBlocksThisChunk blocks maxBatchesThisBundle := p.maxBatchNumPerBundle - batches, err := p.batchOrm.GetBatchesGEIndexGECodecVersion(p.ctx, firstUnbundledBatchIndex, encoding.CodecV3, int(maxBatchesThisBundle)) + batches, err := p.batchOrm.GetBatchesGEIndexGECodecVersion(p.ctx, firstUnbundledBatchIndex, p.minCodecVersion, int(maxBatchesThisBundle)) if err != nil { return err } @@ -153,6 +156,11 @@ func (p *BundleProposer) proposeBundle() error { hardforkName := encoding.GetHardforkName(p.chainCfg, firstChunk.StartBlockNumber, firstChunk.StartBlockTime) codecVersion := encoding.CodecVersion(batches[0].CodecVersion) + + if codecVersion < p.minCodecVersion { + return fmt.Errorf("unsupported codec version: %v, expected at least %v", codecVersion, p.minCodecVersion) + } + for i := 1; i < len(batches); i++ { chunk, err := p.chunkOrm.GetChunkByIndex(p.ctx, batches[i].StartChunkIndex) if err != nil { diff --git a/rollup/internal/controller/watcher/bundle_proposer_test.go b/rollup/internal/controller/watcher/bundle_proposer_test.go index c866f5f321..4a0a4219b0 100644 --- a/rollup/internal/controller/watcher/bundle_proposer_test.go +++ b/rollup/internal/controller/watcher/bundle_proposer_test.go @@ -20,7 +20,7 @@ import ( "scroll-tech/rollup/internal/utils" ) -func testBundleProposerLimits(t *testing.T) { +func testBundleProposerLimitsCodecV4(t *testing.T) { tests := []struct { name string maxBatchNumPerBundle uint64 @@ -88,7 +88,7 @@ func testBundleProposerLimits(t *testing.T) { err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) assert.NoError(t, err) - chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)} cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ MaxBlockNumPerChunk: 1, @@ -99,7 +99,7 @@ func testBundleProposerLimits(t *testing.T) { ChunkTimeoutSec: math.MaxUint32, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ MaxL1CommitGasPerBatch: math.MaxUint64, @@ -107,7 +107,7 @@ func testBundleProposerLimits(t *testing.T) { BatchTimeoutSec: 0, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) cp.TryProposeChunk() // chunk1 contains block1 bap.TryProposeBatch() // batch1 contains chunk1 @@ -117,7 +117,7 @@ func testBundleProposerLimits(t *testing.T) { bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{ MaxBatchNumPerBundle: tt.maxBatchNumPerBundle, BundleTimeoutSec: tt.bundleTimeoutSec, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) bup.TryProposeBundle() @@ -134,94 +134,3 @@ func testBundleProposerLimits(t *testing.T) { }) } } - -func testBundleProposerRespectHardforks(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - chainConfig := ¶ms.ChainConfig{ - LondonBlock: big.NewInt(0), - BernoulliBlock: big.NewInt(1), - CurieBlock: big.NewInt(2), - DarwinTime: func() *uint64 { t := uint64(4); return &t }(), - } - - // Add genesis batch. - block := &encoding.Block{ - Header: &gethTypes.Header{ - Number: big.NewInt(0), - }, - RowConsumption: &gethTypes.RowConsumption{}, - } - chunk := &encoding.Chunk{ - Blocks: []*encoding.Block{block}, - } - chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{}) - assert.NoError(t, err) - batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk}, - } - batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: math.MaxUint64, - MaxTxNumPerChunk: math.MaxUint64, - MaxL1CommitGasPerChunk: math.MaxUint64, - MaxL1CommitCalldataSizePerChunk: math.MaxUint64, - MaxRowConsumptionPerChunk: math.MaxUint64, - ChunkTimeoutSec: 0, - GasCostIncreaseMultiplier: 1, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) - - block = readBlockFromJSON(t, "../../../testdata/blockTrace_02.json") - for i := int64(1); i <= 60; i++ { - block.Header.Number = big.NewInt(i) - block.Header.Time = uint64(i) - err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block}) - assert.NoError(t, err) - } - - for i := 0; i < 5; i++ { - cp.TryProposeChunk() - } - - bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: math.MaxUint64, - MaxL1CommitCalldataSizePerBatch: math.MaxUint64, - BatchTimeoutSec: 0, - GasCostIncreaseMultiplier: 1, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) - - for i := 0; i < 5; i++ { - bap.TryProposeBatch() - } - - bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{ - MaxBatchNumPerBundle: math.MaxUint64, - BundleTimeoutSec: 0, - }, chainConfig, db, nil) - - for i := 0; i < 5; i++ { - bup.TryProposeBundle() - } - - bundleOrm := orm.NewBundle(db) - bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, bundles, 1) - - expectedStartBatchIndices := []uint64{3} - expectedEndChunkIndices := []uint64{3} - for i, bundle := range bundles { - assert.Equal(t, expectedStartBatchIndices[i], bundle.StartBatchIndex) - assert.Equal(t, expectedEndChunkIndices[i], bundle.EndBatchIndex) - } -} diff --git a/rollup/internal/controller/watcher/chunk_proposer.go b/rollup/internal/controller/watcher/chunk_proposer.go index 99eaca9d44..87056e6464 100644 --- a/rollup/internal/controller/watcher/chunk_proposer.go +++ b/rollup/internal/controller/watcher/chunk_proposer.go @@ -34,7 +34,8 @@ type ChunkProposer struct { gasCostIncreaseMultiplier float64 maxUncompressedBatchBytesSize uint64 - chainCfg *params.ChainConfig + minCodecVersion encoding.CodecVersion + chainCfg *params.ChainConfig chunkProposerCircleTotal prometheus.Counter proposeChunkFailureTotal prometheus.Counter @@ -60,7 +61,7 @@ type ChunkProposer struct { } // NewChunkProposer creates a new ChunkProposer instance. -func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer { +func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer { log.Info("new chunk proposer", "maxBlockNumPerChunk", cfg.MaxBlockNumPerChunk, "maxTxNumPerChunk", cfg.MaxTxNumPerChunk, @@ -85,6 +86,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai chunkTimeoutSec: cfg.ChunkTimeoutSec, gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier, maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize, + minCodecVersion: minCodecVersion, chainCfg: chainCfg, chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ @@ -277,6 +279,10 @@ func (p *ChunkProposer) proposeChunk() error { codecVersion := encoding.GetCodecVersion(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time) + if codecVersion < p.minCodecVersion { + return fmt.Errorf("unsupported codec version: %v, expected at least %v", codecVersion, p.minCodecVersion) + } + // Including Curie block in a sole chunk. if p.chainCfg.CurieBlock != nil && blocks[0].Header.Number.Cmp(p.chainCfg.CurieBlock) == 0 { chunk := encoding.Chunk{Blocks: blocks[:1]} diff --git a/rollup/internal/controller/watcher/chunk_proposer_test.go b/rollup/internal/controller/watcher/chunk_proposer_test.go index 31d4c1ddf4..4886fbebe0 100644 --- a/rollup/internal/controller/watcher/chunk_proposer_test.go +++ b/rollup/internal/controller/watcher/chunk_proposer_test.go @@ -16,517 +16,7 @@ import ( "scroll-tech/rollup/internal/orm" ) -func testChunkProposerCodecv0Limits(t *testing.T) { - tests := []struct { - name string - maxBlockNum uint64 - maxTxNum uint64 - maxL1CommitGas uint64 - maxL1CommitCalldataSize uint64 - maxRowConsumption uint64 - chunkTimeoutSec uint64 - expectedChunksLen int - expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 - }{ - { - name: "NoLimitReached", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "Timeout", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 0, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 2, - }, - { - name: "MaxTxNumPerChunkIs0", - maxBlockNum: 10, - maxTxNum: 0, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxL1CommitGasPerChunkIs0", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 0, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxL1CommitCalldataSizePerChunkIs0", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 0, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxRowConsumptionPerChunkIs0", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 0, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxBlockNumPerChunkIs1", - maxBlockNum: 1, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxTxNumPerChunkIsFirstBlock", - maxBlockNum: 10, - maxTxNum: 2, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxL1CommitGasPerChunkIsFirstBlock", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 7250, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 298, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxRowConsumptionPerChunkIs1", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - l2BlockOrm := orm.NewL2Block(db) - err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: tt.maxBlockNum, - MaxTxNumPerChunk: tt.maxTxNum, - MaxL1CommitGasPerChunk: tt.maxL1CommitGas, - MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize, - MaxRowConsumptionPerChunk: tt.maxRowConsumption, - ChunkTimeoutSec: tt.chunkTimeoutSec, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{}, db, nil) - cp.TryProposeChunk() - - chunkOrm := orm.NewChunk(db) - chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0) - assert.NoError(t, err) - assert.Len(t, chunks, tt.expectedChunksLen) - - if len(chunks) > 0 { - blockOrm := orm.NewL2Block(db) - chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk) - assert.NoError(t, err) - assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk) - firstChunkHash := chunks[0].Hash - for _, chunkHash := range chunkHashes { - assert.Equal(t, firstChunkHash, chunkHash) - } - } - }) - } -} - -func testChunkProposerCodecv1Limits(t *testing.T) { - tests := []struct { - name string - maxBlockNum uint64 - maxTxNum uint64 - maxL1CommitGas uint64 - maxL1CommitCalldataSize uint64 - maxRowConsumption uint64 - chunkTimeoutSec uint64 - expectedChunksLen int - expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 - }{ - { - name: "NoLimitReached", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "Timeout", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 0, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 2, - }, - { - name: "MaxTxNumPerChunkIs0", - maxBlockNum: 10, - maxTxNum: 0, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxL1CommitGasPerChunkIs0", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 0, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxL1CommitCalldataSizePerChunkIs0", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 0, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxRowConsumptionPerChunkIs0", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 0, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxBlockNumPerChunkIs1", - maxBlockNum: 1, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxTxNumPerChunkIsFirstBlock", - maxBlockNum: 10, - maxTxNum: 2, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxL1CommitGasPerChunkIsFirstBlock", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 2500, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 60, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxRowConsumptionPerChunkIs1", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - l2BlockOrm := orm.NewL2Block(db) - err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: tt.maxBlockNum, - MaxTxNumPerChunk: tt.maxTxNum, - MaxL1CommitGasPerChunk: tt.maxL1CommitGas, - MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize, - MaxRowConsumptionPerChunk: tt.maxRowConsumption, - ChunkTimeoutSec: tt.chunkTimeoutSec, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)}, db, nil) - cp.TryProposeChunk() - - chunkOrm := orm.NewChunk(db) - chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0) - assert.NoError(t, err) - assert.Len(t, chunks, tt.expectedChunksLen) - - if len(chunks) > 0 { - blockOrm := orm.NewL2Block(db) - chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk) - assert.NoError(t, err) - assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk) - firstChunkHash := chunks[0].Hash - for _, chunkHash := range chunkHashes { - assert.Equal(t, firstChunkHash, chunkHash) - } - } - }) - } -} - -func testChunkProposerCodecv2Limits(t *testing.T) { - tests := []struct { - name string - maxBlockNum uint64 - maxTxNum uint64 - maxL1CommitGas uint64 - maxL1CommitCalldataSize uint64 - maxRowConsumption uint64 - chunkTimeoutSec uint64 - expectedChunksLen int - expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0 - }{ - { - name: "NoLimitReached", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "Timeout", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 0, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 2, - }, - { - name: "MaxTxNumPerChunkIs0", - maxBlockNum: 10, - maxTxNum: 0, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxL1CommitGasPerChunkIs0", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 0, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxL1CommitCalldataSizePerChunkIs0", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 0, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxRowConsumptionPerChunkIs0", - maxBlockNum: 100, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 0, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 0, - }, - { - name: "MaxBlockNumPerChunkIs1", - maxBlockNum: 1, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxTxNumPerChunkIsFirstBlock", - maxBlockNum: 10, - maxTxNum: 2, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxL1CommitGasPerChunkIsFirstBlock", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 2500, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 60, - maxRowConsumption: 1000000, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - { - name: "MaxRowConsumptionPerChunkIs1", - maxBlockNum: 10, - maxTxNum: 10000, - maxL1CommitGas: 50000000000, - maxL1CommitCalldataSize: 1000000, - maxRowConsumption: 1, - chunkTimeoutSec: 1000000000000, - expectedChunksLen: 1, - expectedBlocksInFirstChunk: 1, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - l2BlockOrm := orm.NewL2Block(db) - err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) - assert.NoError(t, err) - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: tt.maxBlockNum, - MaxTxNumPerChunk: tt.maxTxNum, - MaxL1CommitGasPerChunk: tt.maxL1CommitGas, - MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize, - MaxRowConsumptionPerChunk: tt.maxRowConsumption, - ChunkTimeoutSec: tt.chunkTimeoutSec, - GasCostIncreaseMultiplier: 1.2, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil) - cp.TryProposeChunk() - - chunkOrm := orm.NewChunk(db) - chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0) - assert.NoError(t, err) - assert.Len(t, chunks, tt.expectedChunksLen) - - if len(chunks) > 0 { - blockOrm := orm.NewL2Block(db) - chunkHashes, err := blockOrm.GetChunkHashes(context.Background(), tt.expectedBlocksInFirstChunk) - assert.NoError(t, err) - assert.Len(t, chunkHashes, tt.expectedBlocksInFirstChunk) - firstChunkHash := chunks[0].Hash - for _, chunkHash := range chunkHashes { - assert.Equal(t, firstChunkHash, chunkHash) - } - } - }) - } -} - -func testChunkProposerCodecv3Limits(t *testing.T) { +func testChunkProposerLimitsCodecV4(t *testing.T) { tests := []struct { name string maxBlockNum uint64 @@ -674,7 +164,7 @@ func testChunkProposerCodecv3Limits(t *testing.T) { ChunkTimeoutSec: tt.chunkTimeoutSec, GasCostIncreaseMultiplier: 1.2, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)}, db, nil) + }, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil) cp.TryProposeChunk() chunkOrm := orm.NewChunk(db) @@ -696,8 +186,8 @@ func testChunkProposerCodecv3Limits(t *testing.T) { } } -func testChunkProposerBlobSizeLimit(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} +func testChunkProposerBlobSizeLimitCodecV4(t *testing.T) { + codecVersions := []encoding.CodecVersion{encoding.CodecV4} for _, codecVersion := range codecVersions { db := setupDB(t) block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json") @@ -709,14 +199,10 @@ func testChunkProposerBlobSizeLimit(t *testing.T) { } var chainConfig *params.ChainConfig - if codecVersion == encoding.CodecV0 { // will never hit blob size limit - chainConfig = ¶ms.ChainConfig{} - } else if codecVersion == encoding.CodecV1 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - } else if codecVersion == encoding.CodecV2 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} + if codecVersion == encoding.CodecV4 { + chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)} } else { - chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + assert.Fail(t, "unsupported codec version, expected CodecV4") } cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ @@ -728,7 +214,7 @@ func testChunkProposerBlobSizeLimit(t *testing.T) { ChunkTimeoutSec: math.MaxUint32, GasCostIncreaseMultiplier: 1, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) for i := 0; i < 2; i++ { cp.TryProposeChunk() @@ -740,14 +226,10 @@ func testChunkProposerBlobSizeLimit(t *testing.T) { var expectedNumChunks int = 2 var numBlocksMultiplier uint64 - if codecVersion == encoding.CodecV0 { - numBlocksMultiplier = 255 - } else if codecVersion == encoding.CodecV1 { - numBlocksMultiplier = 22 - } else if codecVersion == encoding.CodecV2 { + if codecVersion == encoding.CodecV4 { numBlocksMultiplier = 255 } else { - numBlocksMultiplier = 255 + assert.Fail(t, "unsupported codec version, expected CodecV4") } assert.Len(t, chunks, expectedNumChunks) @@ -761,47 +243,3 @@ func testChunkProposerBlobSizeLimit(t *testing.T) { database.CloseDB(db) } } - -func testChunkProposerRespectHardforks(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json") - for i := int64(1); i <= 20; i++ { - l2BlockOrm := orm.NewL2Block(db) - block.Header.Number = big.NewInt(i) - block.Header.Time = uint64(i) - err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block}) - assert.NoError(t, err) - } - - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: math.MaxUint64, - MaxTxNumPerChunk: math.MaxUint64, - MaxL1CommitGasPerChunk: math.MaxUint64, - MaxL1CommitCalldataSizePerChunk: math.MaxUint64, - MaxRowConsumptionPerChunk: math.MaxUint64, - ChunkTimeoutSec: 0, - GasCostIncreaseMultiplier: 1, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, ¶ms.ChainConfig{ - LondonBlock: big.NewInt(0), - BernoulliBlock: big.NewInt(1), - CurieBlock: big.NewInt(2), - DarwinTime: func() *uint64 { t := uint64(4); return &t }(), - }, db, nil) - - for i := 0; i < 5; i++ { - cp.TryProposeChunk() - } - - chunkOrm := orm.NewChunk(db) - chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0) - assert.NoError(t, err) - - assert.Len(t, chunks, 4) - expectedEndBlockNumbers := []uint64{1, 2, 3, 20} - for i, chunk := range chunks { - assert.Equal(t, expectedEndBlockNumbers[i], chunk.EndBlockNumber) - } -} diff --git a/rollup/internal/controller/watcher/watcher_test.go b/rollup/internal/controller/watcher/watcher_test.go index 03498b3c30..465b873e33 100644 --- a/rollup/internal/controller/watcher/watcher_test.go +++ b/rollup/internal/controller/watcher/watcher_test.go @@ -100,29 +100,17 @@ func TestFunction(t *testing.T) { t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks) // Run chunk proposer test cases. - t.Run("TestChunkProposerCodecv0Limits", testChunkProposerCodecv0Limits) - t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits) - t.Run("TestChunkProposerCodecv2Limits", testChunkProposerCodecv2Limits) - t.Run("TestChunkProposerCodecv3Limits", testChunkProposerCodecv3Limits) - t.Run("TestChunkProposerBlobSizeLimit", testChunkProposerBlobSizeLimit) - t.Run("TestChunkProposerRespectHardforks", testChunkProposerRespectHardforks) + t.Run("TestChunkProposerLimitsCodecV4", testChunkProposerLimitsCodecV4) + t.Run("TestChunkProposerBlobSizeLimitCodecV4", testChunkProposerBlobSizeLimitCodecV4) // Run batch proposer test cases. - t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits) - t.Run("TestBatchProposerCodecv1Limits", testBatchProposerCodecv1Limits) - t.Run("TestBatchProposerCodecv2Limits", testBatchProposerCodecv2Limits) - t.Run("TestBatchProposerCodecv3Limits", testBatchProposerCodecv3Limits) - t.Run("TestBatchCommitGasAndCalldataSizeCodecv0Estimation", testBatchCommitGasAndCalldataSizeCodecv0Estimation) - t.Run("TestBatchCommitGasAndCalldataSizeCodecv1Estimation", testBatchCommitGasAndCalldataSizeCodecv1Estimation) - t.Run("TestBatchCommitGasAndCalldataSizeCodecv2Estimation", testBatchCommitGasAndCalldataSizeCodecv2Estimation) - t.Run("TestBatchCommitGasAndCalldataSizeCodecv3Estimation", testBatchCommitGasAndCalldataSizeCodecv3Estimation) - t.Run("TestBatchProposerBlobSizeLimit", testBatchProposerBlobSizeLimit) - t.Run("TestBatchProposerMaxChunkNumPerBatchLimit", testBatchProposerMaxChunkNumPerBatchLimit) - t.Run("TestBatchProposerRespectHardforks", testBatchProposerRespectHardforks) + t.Run("TestBatchProposerLimitsCodecV4", testBatchProposerLimitsCodecV4) + t.Run("TestBatchCommitGasAndCalldataSizeEstimationCodecV4", testBatchCommitGasAndCalldataSizeEstimationCodecV4) + t.Run("TestBatchProposerBlobSizeLimitCodecV4", testBatchProposerBlobSizeLimitCodecV4) + t.Run("TestBatchProposerMaxChunkNumPerBatchLimitCodecV4", testBatchProposerMaxChunkNumPerBatchLimitCodecV4) // Run bundle proposer test cases. - t.Run("TestBundleProposerLimits", testBundleProposerLimits) - t.Run("TestBundleProposerRespectHardforks", testBundleProposerRespectHardforks) + t.Run("TestBundleProposerLimitsCodecV4", testBundleProposerLimitsCodecV4) } func readBlockFromJSON(t *testing.T, filename string) *encoding.Block { diff --git a/rollup/internal/orm/orm_test.go b/rollup/internal/orm/orm_test.go index 390c860925..52e70df2c6 100644 --- a/rollup/internal/orm/orm_test.go +++ b/rollup/internal/orm/orm_test.go @@ -166,7 +166,7 @@ func TestL2BlockOrm(t *testing.T) { } func TestChunkOrm(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3, encoding.CodecV4} chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} for _, codecVersion := range codecVersions { @@ -229,7 +229,7 @@ func TestChunkOrm(t *testing.T) { } func TestBatchOrm(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3} + codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3, encoding.CodecV4} chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} for _, codecVersion := range codecVersions { @@ -378,7 +378,7 @@ func TestBundleOrm(t *testing.T) { Index: 0, Chunks: []*encoding.Chunk{chunk1}, } - dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV3, utils.BatchMetrics{}) + dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV4, utils.BatchMetrics{}) assert.NoError(t, err) chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} @@ -386,30 +386,30 @@ func TestBundleOrm(t *testing.T) { Index: 1, Chunks: []*encoding.Chunk{chunk2}, } - dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV3, utils.BatchMetrics{}) + dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV4, utils.BatchMetrics{}) assert.NoError(t, err) var bundle1 *Bundle var bundle2 *Bundle t.Run("InsertBundle", func(t *testing.T) { - bundle1, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch1}, encoding.CodecV3) + bundle1, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch1}, encoding.CodecV4) assert.NoError(t, err) assert.NotNil(t, bundle1) assert.Equal(t, uint64(0), bundle1.StartBatchIndex) assert.Equal(t, uint64(0), bundle1.EndBatchIndex) assert.Equal(t, dbBatch1.Hash, bundle1.StartBatchHash) assert.Equal(t, dbBatch1.Hash, bundle1.EndBatchHash) - assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(bundle1.CodecVersion)) + assert.Equal(t, encoding.CodecV4, encoding.CodecVersion(bundle1.CodecVersion)) - bundle2, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch2}, encoding.CodecV3) + bundle2, err = bundleOrm.InsertBundle(context.Background(), []*Batch{dbBatch2}, encoding.CodecV4) assert.NoError(t, err) assert.NotNil(t, bundle2) assert.Equal(t, uint64(1), bundle2.StartBatchIndex) assert.Equal(t, uint64(1), bundle2.EndBatchIndex) assert.Equal(t, dbBatch2.Hash, bundle2.StartBatchHash) assert.Equal(t, dbBatch2.Hash, bundle2.EndBatchHash) - assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(bundle2.CodecVersion)) + assert.Equal(t, encoding.CodecV4, encoding.CodecVersion(bundle2.CodecVersion)) }) t.Run("GetFirstUnbundledBatchIndex", func(t *testing.T) { @@ -560,7 +560,7 @@ func TestPendingTransactionOrm(t *testing.T) { err = pendingTransactionOrm.InsertPendingTransaction(context.Background(), "test", senderMeta, tx1, 0) assert.NoError(t, err) - err = pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(context.Background(), tx0.Hash(), types.TxStatusReplaced) + err = pendingTransactionOrm.UpdateTransactionStatusByTxHash(context.Background(), tx0.Hash(), types.TxStatusReplaced) assert.NoError(t, err) txs, err := pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), senderMeta.Type, 2) @@ -577,7 +577,7 @@ func TestPendingTransactionOrm(t *testing.T) { assert.Equal(t, senderMeta.Address.String(), txs[1].SenderAddress) assert.Equal(t, senderMeta.Type, txs[1].SenderType) - err = pendingTransactionOrm.UpdatePendingTransactionStatusByTxHash(context.Background(), tx1.Hash(), types.TxStatusConfirmed) + err = pendingTransactionOrm.UpdateTransactionStatusByTxHash(context.Background(), tx1.Hash(), types.TxStatusConfirmed) assert.NoError(t, err) txs, err = pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), senderMeta.Type, 2) @@ -594,4 +594,17 @@ func TestPendingTransactionOrm(t *testing.T) { status, err := pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), tx0.Hash()) assert.NoError(t, err) assert.Equal(t, types.TxStatusConfirmedFailed, status) + + // Test DeleteTransactionByTxHash + err = pendingTransactionOrm.DeleteTransactionByTxHash(context.Background(), tx0.Hash()) + assert.NoError(t, err) + + // Verify the transaction is deleted + status, err = pendingTransactionOrm.GetTxStatusByTxHash(context.Background(), tx0.Hash()) + assert.NoError(t, err) + assert.Equal(t, types.TxStatusUnknown, status) // Should return unknown status for deleted transaction + + // Try to delete non-existent transaction + err = pendingTransactionOrm.DeleteTransactionByTxHash(context.Background(), common.HexToHash("0x123")) + assert.Error(t, err) // Should return error for non-existent transaction } diff --git a/rollup/internal/orm/pending_transaction.go b/rollup/internal/orm/pending_transaction.go index 3c73ed13eb..df53682704 100644 --- a/rollup/internal/orm/pending_transaction.go +++ b/rollup/internal/orm/pending_transaction.go @@ -8,6 +8,7 @@ import ( "github.com/scroll-tech/go-ethereum/common" gethTypes "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/log" "gorm.io/gorm" "scroll-tech/common/types" @@ -150,8 +151,33 @@ func (o *PendingTransaction) InsertPendingTransaction(ctx context.Context, conte return nil } -// UpdatePendingTransactionStatusByTxHash updates the status of a transaction based on the transaction hash. -func (o *PendingTransaction) UpdatePendingTransactionStatusByTxHash(ctx context.Context, hash common.Hash, status types.TxStatus, dbTX ...*gorm.DB) error { +// DeleteTransactionByTxHash permanently deletes a transaction record from the database by transaction hash. +// Using permanent delete (Unscoped) instead of soft delete to prevent database bloat, as repeated SendTransaction failures +// could write a large number of transactions to the database. +func (o *PendingTransaction) DeleteTransactionByTxHash(ctx context.Context, hash common.Hash, dbTX ...*gorm.DB) error { + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&PendingTransaction{}) + + // Perform permanent delete by using Unscoped() + result := db.Where("hash = ?", hash.String()).Unscoped().Delete(&PendingTransaction{}) + if result.Error != nil { + return fmt.Errorf("failed to delete transaction, err: %w", result.Error) + } + if result.RowsAffected == 0 { + return fmt.Errorf("no transaction found with hash: %s", hash.String()) + } + if result.RowsAffected > 0 { + log.Warn("Successfully deleted transaction", "hash", hash.String()) + } + return nil +} + +// UpdateTransactionStatusByTxHash updates the status of a transaction based on the transaction hash. +func (o *PendingTransaction) UpdateTransactionStatusByTxHash(ctx context.Context, hash common.Hash, status types.TxStatus, dbTX ...*gorm.DB) error { db := o.db if len(dbTX) > 0 && dbTX[0] != nil { db = dbTX[0] @@ -160,7 +186,7 @@ func (o *PendingTransaction) UpdatePendingTransactionStatusByTxHash(ctx context. db = db.Model(&PendingTransaction{}) db = db.Where("hash = ?", hash.String()) if err := db.Update("status", status).Error; err != nil { - return fmt.Errorf("failed to UpdatePendingTransactionStatusByTxHash, txHash: %s, error: %w", hash, err) + return fmt.Errorf("failed to UpdateTransactionStatusByTxHash, txHash: %s, error: %w", hash, err) } return nil } diff --git a/rollup/mock_bridge/MockBridge.sol b/rollup/mock_bridge/MockBridge.sol index c94bc221b3..d1bddd4abc 100644 --- a/rollup/mock_bridge/MockBridge.sol +++ b/rollup/mock_bridge/MockBridge.sol @@ -102,10 +102,6 @@ contract MockBridge { mapping(uint256 => bytes32) public withdrawRoots; - function setL1BaseFee(uint256 _l1BaseFee) external { - l1BaseFee = _l1BaseFee; - } - function setL1BaseFeeAndBlobBaseFee(uint256 _l1BaseFee, uint256 _l1BlobBaseFee) external { l1BaseFee = _l1BaseFee; l1BlobBaseFee = _l1BlobBaseFee; diff --git a/rollup/tests/bridge_test.go b/rollup/tests/bridge_test.go index 3ab88f8439..41cba06469 100644 --- a/rollup/tests/bridge_test.go +++ b/rollup/tests/bridge_test.go @@ -208,12 +208,10 @@ func TestFunction(t *testing.T) { // l1 rollup and watch rollup events t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch) - t.Run("testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions", testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions) - t.Run("TestCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions", testCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions) + t.Run("TestCommitBatchAndFinalizeBundleCodecV4", testCommitBatchAndFinalizeBundleCodecV4) // l1/l2 gas oracle t.Run("TestImportL1GasPrice", testImportL1GasPrice) - t.Run("TestImportL1GasPriceAfterCurie", testImportL1GasPriceAfterCurie) t.Run("TestImportDefaultL1GasPriceDueToL1GasPriceSpike", testImportDefaultL1GasPriceDueToL1GasPriceSpike) t.Run("TestImportL2GasPrice", testImportL2GasPrice) } diff --git a/rollup/tests/gas_oracle_test.go b/rollup/tests/gas_oracle_test.go index cdcd5a3e01..f44587dc27 100644 --- a/rollup/tests/gas_oracle_test.go +++ b/rollup/tests/gas_oracle_test.go @@ -30,80 +30,7 @@ func testImportL1GasPrice(t *testing.T) { l1Cfg := rollupApp.Config.L1Config // Create L1Relayer - l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, ¶ms.ChainConfig{}, relayer.ServiceTypeL1GasOracle, nil) - assert.NoError(t, err) - defer l1Relayer.StopSenders() - - // Create L1Watcher - startHeight, err := l1Client.BlockNumber(context.Background()) - assert.NoError(t, err) - l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, db, nil) - - // fetch new blocks - number, err := l1Client.BlockNumber(context.Background()) - assert.Greater(t, number, startHeight-1) - assert.NoError(t, err) - err = l1Watcher.FetchBlockHeader(number) - assert.NoError(t, err) - - l1BlockOrm := orm.NewL1Block(db) - // check db status - latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background()) - assert.NoError(t, err) - assert.Equal(t, number, latestBlockHeight) - blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight}) - assert.NoError(t, err) - assert.Equal(t, len(blocks), 1) - assert.Empty(t, blocks[0].OracleTxHash) - assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending) - - // add fake batch to pass check for commit batch timeout - chunk := &encoding.Chunk{ - Blocks: []*encoding.Block{ - { - Header: &gethTypes.Header{ - Number: big.NewInt(1), - ParentHash: common.Hash{}, - Difficulty: big.NewInt(0), - BaseFee: big.NewInt(0), - }, - Transactions: nil, - WithdrawRoot: common.Hash{}, - RowConsumption: &gethTypes.RowConsumption{}, - }, - }, - } - batch := &encoding.Batch{ - Index: 0, - TotalL1MessagePoppedBefore: 0, - ParentBatchHash: common.Hash{}, - Chunks: []*encoding.Chunk{chunk}, - } - batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{}) - assert.NoError(t, err) - err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted) - assert.NoError(t, err) - - // relay gas price - l1Relayer.ProcessGasPriceOracle() - blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight}) - assert.NoError(t, err) - assert.Equal(t, len(blocks), 1) - assert.NotEmpty(t, blocks[0].OracleTxHash) - assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting) -} - -func testImportL1GasPriceAfterCurie(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - prepareContracts(t) - - l1Cfg := rollupApp.Config.L1Config - - // Create L1Relayer - l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, relayer.ServiceTypeL1GasOracle, nil) + l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, relayer.ServiceTypeL1GasOracle, nil) assert.NoError(t, err) defer l1Relayer.StopSenders() @@ -178,7 +105,7 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) { // set CheckCommittedBatchesWindowMinutes to zero to not pass check for commit batch timeout l1CfgCopy.RelayerConfig.GasOracleConfig.CheckCommittedBatchesWindowMinutes = 0 // Create L1Relayer - l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1CfgCopy.RelayerConfig, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, relayer.ServiceTypeL1GasOracle, nil) + l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1CfgCopy.RelayerConfig, relayer.ServiceTypeL1GasOracle, nil) assert.NoError(t, err) defer l1Relayer.StopSenders() diff --git a/rollup/tests/process_start_test.go b/rollup/tests/process_start_test.go index 0b77656824..08b9659990 100644 --- a/rollup/tests/process_start_test.go +++ b/rollup/tests/process_start_test.go @@ -19,7 +19,7 @@ func testProcessStart(t *testing.T) { db := setupDB(t) defer database.CloseDB(db) rollupApp.RunApp(t, cutils.GasOracleApp, "--genesis", "../conf/genesis.json") - rollupApp.RunApp(t, cutils.RollupRelayerApp, "--genesis", "../conf/genesis.json") + rollupApp.RunApp(t, cutils.RollupRelayerApp, "--genesis", "../conf/genesis.json", "--min-codec-version", "4") rollupApp.WaitExit() } @@ -36,7 +36,7 @@ func testProcessStartEnableMetrics(t *testing.T) { port, err = rand.Int(rand.Reader, big.NewInt(10000)) assert.NoError(t, err) svrPort = strconv.FormatInt(port.Int64()+30000, 10) - rollupApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json") + rollupApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json", "--min-codec-version", "4") rollupApp.WaitExit() } diff --git a/rollup/tests/rollup_test.go b/rollup/tests/rollup_test.go index ea99b508e3..4251aa273b 100644 --- a/rollup/tests/rollup_test.go +++ b/rollup/tests/rollup_test.go @@ -52,24 +52,18 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) { assert.Equal(t, types.RollupFinalized, types.RollupStatus(batch.RollupStatus)) } -func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) { - codecVersions := []encoding.CodecVersion{encoding.CodecV0, encoding.CodecV1, encoding.CodecV2, encoding.CodecV3, encoding.CodecV4} +func testCommitBatchAndFinalizeBundleCodecV4(t *testing.T) { + codecVersions := []encoding.CodecVersion{encoding.CodecV4} for _, codecVersion := range codecVersions { db := setupDB(t) prepareContracts(t) var chainConfig *params.ChainConfig - if codecVersion == encoding.CodecV0 { - chainConfig = ¶ms.ChainConfig{} - } else if codecVersion == encoding.CodecV1 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - } else if codecVersion == encoding.CodecV2 { - chainConfig = ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} - } else if codecVersion == encoding.CodecV3 { - chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + if codecVersion == encoding.CodecV4 { + chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)} } else { - chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} + assert.Fail(t, "unsupported codec version, expected CodecV4") } // Create L2Relayer @@ -103,19 +97,19 @@ func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) { MaxRowConsumptionPerChunk: 1048319, ChunkTimeoutSec: 300, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ MaxL1CommitGasPerBatch: 50000000000, MaxL1CommitCalldataSizePerBatch: 1000000, BatchTimeoutSec: 300, MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{ MaxBatchNumPerBundle: 1000000, BundleTimeoutSec: 300, - }, chainConfig, db, nil) + }, encoding.CodecV4, chainConfig, db, nil) l2BlockOrm := orm.NewL2Block(db) err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[:5]) @@ -178,7 +172,6 @@ func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) { } assert.Eventually(t, func() bool { - l2Relayer.ProcessCommittedBatches() l2Relayer.ProcessPendingBundles() batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) @@ -198,26 +191,24 @@ func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) { bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) assert.NoError(t, err) - if codecVersion == encoding.CodecV0 || codecVersion == encoding.CodecV1 || codecVersion == encoding.CodecV2 { - assert.Len(t, bundles, 0) - } else { - assert.Len(t, bundles, 1) - bundle := bundles[0] - if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized { - return false - } - assert.NotEmpty(t, bundle.FinalizeTxHash) - receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash)) - assert.NoError(t, err) - assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0) - assert.NoError(t, err) - assert.Len(t, batches, 2) - for _, batch := range batches { - assert.Equal(t, batch.RollupStatus, bundle.RollupStatus) - assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash) - } + assert.Len(t, bundles, 1) + + bundle := bundles[0] + if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized { + return false } + assert.NotEmpty(t, bundle.FinalizeTxHash) + receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash)) + assert.NoError(t, err) + assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) + batches, err = batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0) + assert.NoError(t, err) + assert.Len(t, batches, 2) + for _, batch := range batches { + assert.Equal(t, batch.RollupStatus, bundle.RollupStatus) + assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash) + } + return true }, 30*time.Second, time.Second) @@ -225,177 +216,3 @@ func testCommitBatchAndFinalizeBatchOrBundleWithAllCodecVersions(t *testing.T) { database.CloseDB(db) } } - -func testCommitBatchAndFinalizeBatchOrBundleCrossingAllTransitions(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) - - prepareContracts(t) - - // Create L2Relayer - l2Cfg := rollupApp.Config.L2Config - chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(1), CurieBlock: big.NewInt(2), DarwinTime: func() *uint64 { t := uint64(4); return &t }()} - l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil) - assert.NoError(t, err) - defer l2Relayer.StopSenders() - - // add some blocks to db - var blocks []*encoding.Block - for i := int64(0); i < 10; i++ { - header := gethTypes.Header{ - Number: big.NewInt(i + 1), - ParentHash: common.Hash{}, - Difficulty: big.NewInt(0), - BaseFee: big.NewInt(0), - Root: common.HexToHash("0x1"), - Time: uint64(i + 1), - } - blocks = append(blocks, &encoding.Block{ - Header: &header, - Transactions: nil, - WithdrawRoot: common.HexToHash("0x2"), - RowConsumption: &gethTypes.RowConsumption{}, - }) - } - - l2BlockOrm := orm.NewL2Block(db) - err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks) - assert.NoError(t, err) - - cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 100, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1048319, - ChunkTimeoutSec: 300, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) - - bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxL1CommitGasPerBatch: 50000000000, - MaxL1CommitCalldataSizePerBatch: 1000000, - BatchTimeoutSec: 300, - MaxUncompressedBatchBytesSize: math.MaxUint64, - }, chainConfig, db, nil) - - bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{ - MaxBatchNumPerBundle: 1000000, - BundleTimeoutSec: 300, - }, chainConfig, db, nil) - - cp.TryProposeChunk() - cp.TryProposeChunk() - cp.TryProposeChunk() - cp.TryProposeChunk() - cp.TryProposeChunk() - - bap.TryProposeBatch() - bap.TryProposeBatch() - bap.TryProposeBatch() - bap.TryProposeBatch() - - bup.TryProposeBundle() - - l2Relayer.ProcessPendingBatches() - - batchOrm := orm.NewBatch(db) - bundleOrm := orm.NewBundle(db) - - assert.Eventually(t, func() bool { - batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) - assert.NoError(t, getErr) - assert.Len(t, batches, 4) - batches = batches[1:] - for _, batch := range batches { - if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) { - return false - } - } - return true - }, 30*time.Second, time.Second) - - batchProof := &message.BatchProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - } - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) - assert.NoError(t, err) - batches = batches[1:] - for _, batch := range batches { - err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 600) - assert.NoError(t, err) - err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified) - assert.NoError(t, err) - } - - bundleProof := &message.BundleProof{ - Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Instances: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - Vk: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, - } - bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) - assert.NoError(t, err) - for _, bundle := range bundles { - err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100) - assert.NoError(t, err) - } - - assert.Eventually(t, func() bool { - l2Relayer.ProcessCommittedBatches() - - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) - assert.NoError(t, err) - assert.Len(t, batches, 4) - batches = batches[1:2] - for _, batch := range batches { - if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized { - return false - } - assert.NotEmpty(t, batch.FinalizeTxHash) - receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) - assert.NoError(t, getErr) - assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) - } - return true - }, 30*time.Second, time.Second) - - assert.Eventually(t, func() bool { - l2Relayer.ProcessPendingBundles() - - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0) - assert.NoError(t, err) - assert.Len(t, batches, 4) - batches = batches[3:] - for _, batch := range batches { - if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized { - return false - } - assert.NotEmpty(t, batch.FinalizeTxHash) - receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash)) - assert.NoError(t, getErr) - assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) - } - - bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0) - assert.NoError(t, err) - assert.Len(t, bundles, 1) - bundle := bundles[0] - if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized { - return false - } - assert.NotEmpty(t, bundle.FinalizeTxHash) - receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash)) - assert.NoError(t, err) - assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) - batches, err = batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0) - assert.NoError(t, err) - assert.Len(t, batches, 1) - for _, batch := range batches { - assert.Equal(t, batch.RollupStatus, bundle.RollupStatus) - assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash) - } - return true - }, 30*time.Second, time.Second) -}