diff --git a/.github/workflows/nightly-spartan-bench.yml b/.github/workflows/nightly-spartan-bench.yml index e331be8f8ec5..26a5ec5cffad 100644 --- a/.github/workflows/nightly-spartan-bench.yml +++ b/.github/workflows/nightly-spartan-bench.yml @@ -223,17 +223,122 @@ jobs: --data "$data" fi + block-capacity-benchmark: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + ref: merge-train/spartan + + - name: Determine nightly tag + id: nightly-tag + run: | + if [[ -n "${{ inputs.nightly_tag }}" ]]; then + nightly_tag="${{ inputs.nightly_tag }}" + else + current_version=$(jq -r '."."' .release-please-manifest.json) + nightly_tag="${current_version}-spartan.$(date -u +%Y%m%d)" + fi + echo "nightly_tag=$nightly_tag" >> $GITHUB_OUTPUT + echo "Using nightly tag: $nightly_tag" + + - name: Check if Docker image exists + run: | + DOCKER_IMAGE="aztecprotocol/aztec:${{ steps.nightly-tag.outputs.nightly_tag }}" + echo "Checking if Docker image exists: $DOCKER_IMAGE" + if docker manifest inspect "$DOCKER_IMAGE" > /dev/null 2>&1; then + echo "Docker image exists: $DOCKER_IMAGE" + else + echo "Docker image does not exist: $DOCKER_IMAGE" + exit 1 + fi + + - name: Run block capacity benchmarks + timeout-minutes: 240 + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + BUILD_INSTANCE_SSH_KEY: ${{ secrets.BUILD_INSTANCE_SSH_KEY }} + GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} + GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + RUN_ID: ${{ github.run_id }} + AWS_SHUTDOWN_TIME: 240 + NO_SPOT: 1 + run: | + ./.github/ci3.sh network-block-capacity-bench block-capacity nightly-block-capacity "aztecprotocol/aztec:${{ steps.nightly-tag.outputs.nightly_tag }}" + + - name: Cleanup network resources + if: always() + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + GITHUB_TOKEN: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + BUILD_INSTANCE_SSH_KEY: ${{ secrets.BUILD_INSTANCE_SSH_KEY }} + GCP_SA_KEY: ${{ secrets.GCP_SA_KEY }} + GCP_PROJECT_ID: ${{ secrets.GCP_PROJECT_ID }} + NO_SPOT: 1 + run: ./.github/ci3.sh network-teardown block-capacity nightly-block-capacity + + - name: Download benchmarks + if: always() + env: + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + run: | + if ./ci.sh gh-spartan-block-capacity-bench; then + echo "ENABLE_DEPLOY_BENCH=1" >> $GITHUB_ENV + fi + + - name: Upload benchmarks + if: always() && env.ENABLE_DEPLOY_BENCH == '1' + uses: benchmark-action/github-action-benchmark@4de1bed97a47495fc4c5404952da0499e31f5c29 + with: + name: Spartan + benchmark-data-dir-path: "bench/next" + tool: "customSmallerIsBetter" + output-file-path: ./bench-out/bench.json + gh-repository: github.com/AztecProtocol/benchmark-page-data + github-token: ${{ secrets.AZTEC_BOT_GITHUB_TOKEN }} + auto-push: true + ref: ${{ github.event.workflow_run.head_sha || github.sha }} + alert-threshold: "120%" + comment-on-alert: false + fail-on-alert: false + max-items-in-chart: 100 + + - name: Notify Slack on failure + if: failure() && github.event_name != 'workflow_dispatch' + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + run: | + if [ -n "${SLACK_BOT_TOKEN}" ]; then + read -r -d '' data <" + } + EOF + curl -X POST https://slack.com/api/chat.postMessage \ + -H "Authorization: Bearer $SLACK_BOT_TOKEN" \ + -H "Content-type: application/json" \ + --data "$data" + fi + status: runs-on: ubuntu-latest - needs: [benchmark, proving-benchmark] + needs: [benchmark, proving-benchmark, block-capacity-benchmark] if: always() steps: - name: Check benchmark results run: | - if [[ "${{ needs.benchmark.result }}" != "success" || "${{ needs.proving-benchmark.result }}" != "success" ]]; then + if [[ "${{ needs.benchmark.result }}" != "success" || "${{ needs.proving-benchmark.result }}" != "success" || "${{ needs.block-capacity-benchmark.result }}" != "success" ]]; then echo "One or more benchmark jobs failed" echo "benchmark: ${{ needs.benchmark.result }}" echo "proving-benchmark: ${{ needs.proving-benchmark.result }}" + echo "block-capacity-benchmark: ${{ needs.block-capacity-benchmark.result }}" exit 1 fi echo "All benchmark jobs succeeded" diff --git a/bootstrap.sh b/bootstrap.sh index 125fd48dff09..24de6c460b20 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -704,6 +704,30 @@ case "$cmd" in bench_merge cache_upload spartan-proving-bench-$(git rev-parse HEAD^{tree}).tar.gz bench-out/bench.json ;; + "ci-network-block-capacity-bench") + # Args: [docker_image] + # Deploys network and runs block capacity benchmarks. Cleanup should be done separately. + export CI=1 + env_file="${1:?env_file is required}" + namespace="${2:?namespace is required}" + docker_image="${3:-}" + build + # If no docker image provided, build and push to aztecdev + if [ -z "$docker_image" ]; then + release-image/bootstrap.sh push_pr + docker_image="aztecprotocol/aztecdev:$(git rev-parse HEAD)" + fi + # Set up environment and deploy using spartan + export NAMESPACE="$namespace" + export AZTEC_DOCKER_IMAGE="$docker_image" + spartan/bootstrap.sh network_deploy "${env_file}" + # Run block capacity benchmarks + spartan/bootstrap.sh block_capacity_bench "${env_file}" + rm -rf bench-out + mkdir -p bench-out + bench_merge + cache_upload spartan-block-capacity-bench-$(git rev-parse HEAD^{tree}).tar.gz bench-out/bench.json + ;; "ci-network-teardown") # Args: # Tears down a deployed network. diff --git a/ci.sh b/ci.sh index 62b225fe6b78..5014d9b948f5 100755 --- a/ci.sh +++ b/ci.sh @@ -244,6 +244,14 @@ case "$cmd" in export INSTANCE_POSTFIX="n-proving-bench" bootstrap_ec2 "./bootstrap.sh ci-network-proving-bench $*" ;; + network-block-capacity-bench) + # Args: [docker_image] + # Deploys network and runs block capacity benchmarks. + export CI_DASHBOARD="network" + export JOB_ID="x-${2:?namespace is required}-network-block-capacity-bench" CPUS=16 + export INSTANCE_POSTFIX="n-block-cap-bench" + bootstrap_ec2 "./bootstrap.sh ci-network-block-capacity-bench $*" + ;; network-teardown) # Args: export CI_DASHBOARD="network" @@ -390,7 +398,7 @@ case "$cmd" in ######################## # BENCHMARK PROCESSING # ######################## - gh-bench|gh-deploy-bench|gh-spartan-bench|gh-spartan-proving-bench) + gh-bench|gh-deploy-bench|gh-spartan-bench|gh-spartan-proving-bench|gh-spartan-block-capacity-bench) cache_download ${cmd#gh-}-$(git rev-parse HEAD^{tree}).tar.gz ;; diff --git a/noir-projects/noir-contracts/contracts/test/benchmarking_contract/src/main.nr b/noir-projects/noir-contracts/contracts/test/benchmarking_contract/src/main.nr index 24528824843c..c48ddf69888e 100644 --- a/noir-projects/noir-contracts/contracts/test/benchmarking_contract/src/main.nr +++ b/noir-projects/noir-contracts/contracts/test/benchmarking_contract/src/main.nr @@ -11,7 +11,15 @@ pub contract Benchmarking { macros::{functions::external, storage::storage}, messages::message_delivery::MessageDelivery, note::note_getter_options::NoteGetterOptions, - protocol::address::AztecAddress, + oracle::random::random, + protocol::{ + address::{AztecAddress, EthAddress}, + constants::{ + CONTRACT_CLASS_LOG_SIZE_IN_FIELDS, MAX_L2_TO_L1_MSGS_PER_CALL, + MAX_NOTE_HASHES_PER_CALL, MAX_NULLIFIERS_PER_CALL, MAX_PRIVATE_LOGS_PER_CALL, + PRIVATE_LOG_SIZE_IN_FIELDS, + }, + }, state_vars::{Map, Owned, PrivateSet, PublicMutable}, }; use field_note::FieldNote; @@ -61,4 +69,68 @@ pub contract Benchmarking { fn sha256_hash_1024(data: [u8; 1024]) -> [u8; 32] { sha256::sha256_var(data, data.len()) } + + // Lightest possible private transaction: empty app circuit, no state changes, no public calls. + #[external("private")] + fn noop() {} + + #[external("private")] + fn emit_nullifiers() { + // Safety: Benchmarking code + let random_seed = unsafe { random() }; + for i in 0..MAX_NULLIFIERS_PER_CALL { + self.context.push_nullifier(random_seed + (i as Field)); + } + } + + #[external("private")] + fn emit_note_hashes() { + // Safety: Benchmarking code + let random_seed = unsafe { random() }; + + for i in 0..MAX_NOTE_HASHES_PER_CALL { + self.context.push_note_hash(random_seed + (i as Field)); + } + } + + #[external("private")] + fn emit_l2_to_l1_msgs() { + // Safety: Benchmarking code + let random_seed = unsafe { random() }; + + for i in 0..MAX_L2_TO_L1_MSGS_PER_CALL { + let recipient = EthAddress::from_field((random_seed as u128) as Field + (i as Field)); + self.context.message_portal(recipient, random_seed + (i + 1) as Field); + } + } + + #[external("private")] + fn emit_private_logs() { + // Safety: Benchmarking code + let random_seed = unsafe { random() }; + + for i in 0..MAX_PRIVATE_LOGS_PER_CALL { + let mut log = [0; PRIVATE_LOG_SIZE_IN_FIELDS]; + for j in 0..PRIVATE_LOG_SIZE_IN_FIELDS { + log[i] = random_seed + (i * MAX_PRIVATE_LOGS_PER_CALL + j) as Field; + } + self.context.emit_private_log(log, PRIVATE_LOG_SIZE_IN_FIELDS); + } + } + + #[external("private")] + fn emit_contract_class_log() { + // Safety: Benchmarking code + let random_seed = unsafe { random() }; + + let mut log = [0; CONTRACT_CLASS_LOG_SIZE_IN_FIELDS]; + for i in 0..log.len() { + log[i] = random_seed + (i as Field); + } + self.context.emit_contract_class_log(log); + } + + // Lightest possible private transaction: empty app circuit, no state changes, no public calls. + #[external("public")] + fn noop_pub() {} } diff --git a/spartan/.gitignore b/spartan/.gitignore index 792fa0ebb8b7..bbaa420c0380 100644 --- a/spartan/.gitignore +++ b/spartan/.gitignore @@ -16,6 +16,7 @@ environments/* !environments/devnet-avm-prover.env !environments/devnet-next.env !environments/devnet.env +!environments/block-capacity.env !environments/ignition-fisherman.env !environments/next-net.env !environments/next-scenario.env diff --git a/spartan/bootstrap.sh b/spartan/bootstrap.sh index d769085d45ad..bf8c2ba493f8 100755 --- a/spartan/bootstrap.sh +++ b/spartan/bootstrap.sh @@ -144,6 +144,11 @@ function proving_bench_cmds { echo "$(hash):TIMEOUT=${timeout} TPS=${tps} BENCH_OUTPUT=bench-out/n_tps_prove.${tps}tps.bench.json $root/yarn-project/end-to-end/scripts/run_test.sh simple n_tps_prove.test.ts" } +function block_capacity_bench_cmds { + local timeout=7200 # 2h + echo "$(hash):TIMEOUT=${timeout} BENCH_OUTPUT=bench-out/block_capacity.bench.json $root/yarn-project/end-to-end/scripts/run_test.sh simple block_capacity.test.ts" +} + function network_bench { rm -rf bench-out mkdir -p bench-out @@ -170,6 +175,19 @@ function proving_bench { proving_bench_cmds | parallelize 1 } +function block_capacity_bench { + rm -rf bench-out + mkdir -p bench-out + + local env_file="$1" + source_network_env $env_file + + echo_header "spartan block capacity bench" + gcp_auth + export K8S_ENRICHER=${K8S_ENRICHER:-1} + block_capacity_bench_cmds | parallelize 1 +} + function ensure_eth_balances { amount="$1" # if ETHEREUM_HOST is not set, use the first RPC URL @@ -234,7 +252,7 @@ case "$cmd" in run_network_tests "$1" "$2" ;; - network_tests|network_tests_1|network_tests_2|network_bench|proving_bench) + network_tests|network_tests_1|network_tests_2|network_bench|proving_bench|block_capacity_bench) env_file="$1" $cmd "$env_file" ;; diff --git a/spartan/environments/block-capacity.env b/spartan/environments/block-capacity.env new file mode 100644 index 000000000000..19aa80d1dc35 --- /dev/null +++ b/spartan/environments/block-capacity.env @@ -0,0 +1,50 @@ +NAMESPACE=${NAMESPACE:-block-capacity} +CLUSTER=aztec-gke-private +GCP_REGION=us-west1-a + +AZTEC_EPOCH_DURATION=8 +AZTEC_SLOT_DURATION=72 +AZTEC_PROOF_SUBMISSION_EPOCHS=4 +AZTEC_LAG_IN_EPOCHS_FOR_VALIDATOR_SET=1 +AZTEC_LAG_IN_EPOCHS_FOR_RANDAO=1 + +CREATE_ETH_DEVNET=true +DESTROY_NAMESPACE=true +DESTROY_AZTEC_INFRA=true +CREATE_ROLLUP_CONTRACTS=true +REDEPLOY_ROLLUP_CONTRACTS=true + +ETHEREUM_CHAIN_ID=1337 +LABS_INFRA_MNEMONIC="test test test test test test test test test test test junk" +FUNDING_PRIVATE_KEY="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + +OTEL_COLLECTOR_ENDPOINT=REPLACE_WITH_GCP_SECRET + +VALIDATOR_REPLICAS=1 +VALIDATORS_PER_NODE=48 +PUBLISHERS_PER_VALIDATOR_KEY=1 +VALIDATOR_PUBLISHER_MNEMONIC_START_INDEX=5000 +VALIDATOR_RESOURCE_PROFILE="prod-hi-tps" + +REAL_VERIFIER=false + +RPC_REPLICAS=1 +RPC_INGRESS_ENABLED=false + +PROVER_REPLICAS=10 +PROVER_RESOURCE_PROFILE="dev" +PROVER_PUBLISHER_MNEMONIC_START_INDEX=8000 +PROVER_AGENT_POLL_INTERVAL_MS=10000 +PUBLISHERS_PER_PROVER=1 + +PROVER_TEST_DELAY_TYPE=realistic +DEBUG_FORCE_TX_PROOF_VERIFICATION=true + +SEQ_MAX_TX_PER_BLOCK=72000 # 1000 tps +SEQ_MIN_TX_PER_BLOCK=0 +SEQ_ENFORCE_TIME_TABLE=true +P2P_MAX_TX_POOL_SIZE=1000000000 +DEBUG_P2P_INSTRUMENT_MESSAGES=true + +LOG_LEVEL="debug; info: json-rpc, simulator" + diff --git a/spartan/terraform/deploy-aztec-infra/values/validator-resources-prod-hi-tps.yaml b/spartan/terraform/deploy-aztec-infra/values/validator-resources-prod-hi-tps.yaml new file mode 100644 index 000000000000..ea5ef92f5b67 --- /dev/null +++ b/spartan/terraform/deploy-aztec-infra/values/validator-resources-prod-hi-tps.yaml @@ -0,0 +1,23 @@ +validator: + nodeSelector: + local-ssd: "false" + node-type: "network" + cores: "8" + hi-mem: "true" + node: + resources: + requests: + cpu: "7.5" + memory: "55Gi" + + nodeJsOptions: + - "--max-old-space-size=61440" + statefulSet: + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ReadWriteOnce] + resources: + requests: + storage: 16Gi diff --git a/yarn-project/end-to-end/src/spartan/block_capacity.test.ts b/yarn-project/end-to-end/src/spartan/block_capacity.test.ts new file mode 100644 index 000000000000..22caa27180ea --- /dev/null +++ b/yarn-project/end-to-end/src/spartan/block_capacity.test.ts @@ -0,0 +1,470 @@ +import { SchnorrAccountContract } from '@aztec/accounts/schnorr'; +import { AztecAddress } from '@aztec/aztec.js/addresses'; +import { type ContractFunctionInteraction, NO_WAIT, toSendOptions } from '@aztec/aztec.js/contracts'; +import { SponsoredFeePaymentMethod } from '@aztec/aztec.js/fee'; +import { type AztecNode, createAztecNodeClient, waitForTx } from '@aztec/aztec.js/node'; +import { AccountManager } from '@aztec/aztec.js/wallet'; +import { asyncPool } from '@aztec/foundation/async-pool'; +import { BlockNumber } from '@aztec/foundation/branded-types'; +import { times } from '@aztec/foundation/collection'; +import { Fr } from '@aztec/foundation/curves/bn254'; +import { createLogger } from '@aztec/foundation/log'; +import { retryUntil } from '@aztec/foundation/retry'; +import { TokenContract } from '@aztec/noir-contracts.js/Token'; +import { BenchmarkingContract } from '@aztec/noir-test-contracts.js/Benchmarking'; +import { GasFees } from '@aztec/stdlib/gas'; +import { deriveSigningKey } from '@aztec/stdlib/keys'; +import { Tx } from '@aztec/stdlib/tx'; + +import { jest } from '@jest/globals'; +import { mkdir, writeFile } from 'fs/promises'; +import { dirname } from 'path'; + +import { getSponsoredFPCAddress, registerSponsoredFPC } from '../fixtures/utils.js'; +import type { WorkerWallet } from '../test-wallet/worker_wallet.js'; +import { type WorkerWalletWrapper, createWorkerWalletClient } from './setup_test_wallets.js'; +import { + fetchBlockBuiltLogs, + getExternalIP, + getSequencersConfig, + setupEnvironment, + updateSequencersConfig, +} from './utils.js'; + +const config = setupEnvironment(process.env); +const txRealProofs = config.REAL_VERIFIER || config.DEBUG_FORCE_TX_PROOF_VERIFICATION; + +const BENCH_TESTS = [ + ['noop', 100], + ['noop_pub', 100], + ['emit_nullifiers', 100], + ['emit_note_hashes', 100], + ['emit_l2_to_l1_msgs', 100], + ['emit_private_logs', 88], // we run out of blob space + ['emit_contract_class_log', 8], +] as const; + +const TOKEN_TESTS = [ + // intentional comment - for file fomatting + ['transfer_in_public', 100], +] as const; + +const maxTxs = Math.max(...[...BENCH_TESTS, ...TOKEN_TESTS].map(t => t[1])); +const NUM_WALLETS = txRealProofs ? Math.min(10, maxTxs) : 1; + +describe('block capacity benchmark', () => { + jest.setTimeout(60 * 60 * 1000); // 60 minutes + + const logger = createLogger('e2e:spartan-test:block-capacity'); + + let testWallets: WorkerWalletWrapper[]; + let wallets: WorkerWallet[]; + let accountAddresses: AztecAddress[]; + let aztecNode: AztecNode; + let originalSequencerConfig: Awaited> | undefined; + const benchmarkData: Array<{ name: string; unit: string; value: number }> = []; + + beforeAll(async () => { + logger.info('Setting up block capacity benchmark', { + numWallets: NUM_WALLETS, + txRealProofs, + namespace: config.NAMESPACE, + }); + + await updateSequencersConfig(config, { minTxsPerBlock: 0 }); + + const rpcIP = await getExternalIP(config.NAMESPACE, 'rpc-aztec-node'); + const rpcUrl = `http://${rpcIP}:8080`; + aztecNode = createAztecNodeClient(rpcUrl); + + // Wait for node to be ready + await retryUntil(async () => await aztecNode.isReady(), 'node ready', 120, 1); + logger.info('Node is ready'); + + // Save original sequencer config for restoration + originalSequencerConfig = await getSequencersConfig(config); + logger.info('Saved original sequencer config', { + minTxsPerBlock: originalSequencerConfig[0]?.minTxsPerBlock, + }); + + // Create WorkerWallets in parallel + logger.info(`Creating ${NUM_WALLETS} worker wallet(s)...`); + testWallets = await Promise.all( + Array.from({ length: NUM_WALLETS }, (_, i) => { + logger.info(`Creating wallet ${i + 1}/${NUM_WALLETS}`); + return createWorkerWalletClient(rpcUrl, txRealProofs, logger); + }), + ); + wallets = testWallets.map(tw => tw.wallet); + + // Register FPC and create/deploy accounts in parallel + const fpcAddress = await getSponsoredFPCAddress(); + const sponsor = new SponsoredFeePaymentMethod(fpcAddress); + accountAddresses = await Promise.all( + wallets.map(async wallet => { + const secret = Fr.random(); + const salt = Fr.random(); + const address = await wallet.registerAccount(secret, salt); + await registerSponsoredFPC(wallet); + const manager = await AccountManager.create( + wallet, + secret, + new SchnorrAccountContract(deriveSigningKey(secret)), + salt, + ); + const deployMethod = await manager.getDeployMethod(); + await deployMethod.send({ + from: AztecAddress.ZERO, + fee: { paymentMethod: sponsor }, + wait: { timeout: 2400 }, + }); + logger.info(`Account deployed at ${address}`); + return address; + }), + ); + }); + + afterAll(async () => { + // Write benchmark output if configured + if (process.env.BENCH_OUTPUT && benchmarkData.length > 0) { + const scenario = process.env.BENCH_SCENARIO?.trim(); + const finalData = scenario + ? benchmarkData.map(e => ({ ...e, name: `scenario/${scenario}/${e.name}` })) + : benchmarkData; + await mkdir(dirname(process.env.BENCH_OUTPUT), { recursive: true }); + await writeFile(process.env.BENCH_OUTPUT, JSON.stringify(finalData)); + logger.info('Wrote benchmark output', { path: process.env.BENCH_OUTPUT, entries: finalData.length }); + } + + // Restore original sequencer config + if (originalSequencerConfig?.[0]) { + logger.info('Restoring original sequencer config'); + await updateSequencersConfig(config, originalSequencerConfig[0]); + } + + if (testWallets) { + for (const tw of testWallets) { + await tw.cleanup(); + } + } + + logger.info('Cleanup complete'); + }); + + /** Creates and proves a single tx from a contract interaction. */ + async function createProvableTx( + wallet: WorkerWallet, + accountAddress: AztecAddress, + interaction: ContractFunctionInteraction, + ): Promise { + const sponsor = new SponsoredFeePaymentMethod(await getSponsoredFPCAddress()); + const options = { + from: accountAddress, + fee: { paymentMethod: sponsor, gasSettings: { maxPriorityFeesPerGas: GasFees.empty() } }, + }; + const execPayload = await interaction.request(options); + return wallet.proveTx(execPayload, toSendOptions(options)); + } + + /** Pre-proves TX_COUNT txs, either in parallel batches or by cloning a prototype. */ + async function proveOrCloneTxs( + txCount: number, + createPrototypeFn: (wallet: WorkerWallet, accountAddress: AztecAddress) => Promise, + ): Promise { + const txs: Tx[] = []; + if (txRealProofs) { + for (let i = 0; i < txCount; i += NUM_WALLETS) { + const batchSize = Math.min(NUM_WALLETS, txCount - txs.length); + const batchTxs = await Promise.all(times(batchSize, j => createPrototypeFn(wallets[j], accountAddresses[j]))); + txs.push(...batchTxs); + logger.info(`Proved ${txs.length}/${txCount} txs`); + } + } else { + const prototypeTx = await createPrototypeFn(wallets[0], accountAddresses[0]); + logger.info('Prototype tx proved, cloning...'); + for (let i = 0; i < txCount; i++) { + txs.push(await cloneTx(prototypeTx, aztecNode)); + if ((i + 1) % 10 === 0 || i === txCount - 1) { + logger.info(`Cloned ${i + 1}/${txCount} txs`); + } + } + } + return txs; + } + + /** Floods the mempool with pre-proven txs and measures block capacity. */ + async function floodAndMeasure( + label: string, + provenTxs: Tx[], + ): Promise<{ blockTxCounts: { blockNumber: number; txCount: number }[]; enabledAt: string }> { + const epochDurationSec = 2 * config.AZTEC_EPOCH_DURATION * config.AZTEC_SLOT_DURATION; // wait for up to two epochs (these are shorter epochs than standard) + const txCount = provenTxs.length; + + // 0. wait for the mempool to clear + await retryUntil( + async () => { + const pendingTxs = await aztecNode.getPendingTxCount(); + + if (pendingTxs > 0) { + logger.info(`Waiting for mempool to clear before sending test txs: ${pendingTxs} pending txs left.`); + return false; + } else { + return true; + } + }, + 'clear pending txs', + epochDurationSec, + 1, + ); + + // 1. Disable block building by setting minTxsPerBlock extremely high + logger.info(`[${label}] Disabling block building`); + await updateSequencersConfig(config, { minTxsPerBlock: 999_999_999 }); + await retryUntil( + async () => { + const configs = await getSequencersConfig(config); + return configs.every(c => c.minTxsPerBlock === 999_999_999); + }, + 'disable block building', + 60, + 1, + ); + logger.info(`[${label}] Block building disabled`); + + const blockBeforeFlood = await aztecNode.getBlockNumber(); + logger.info(`[${label}] Block number before flood`, { blockBeforeFlood }); + + // 2. Send all pre-proven txs to mempool + logger.info(`[${label}] Sending ${provenTxs.length} pre-proven txs to mempool`); + const sendStartMs = Date.now(); + + let sentCount = 0; + const txSize = provenTxs[0].toBuffer().length; + logger.info(`Tx size: ${(txSize / 1024 / 1024).toFixed(2)}MB (${txSize} bytes)`); + // dynamically adjust how many txs we can send to stay below 1MB + await asyncPool(Math.max(1, Math.floor((0.5 * 1024 * 1024) / txSize)), provenTxs, async tx => { + await aztecNode.sendTx(tx); + sentCount++; + if (sentCount % 10 === 0 || sentCount === provenTxs.length) { + logger.info(`[${label}] Sent ${sentCount}/${provenTxs.length} txs`); + } + }); + + const sendDurationMs = Date.now() - sendStartMs; + logger.info(`[${label}] All ${provenTxs.length} txs sent to mempool`, { sendDurationMs }); + + // 3. Re-enable block building + const enabledAt = new Date().toISOString(); + logger.info(`[${label}] Re-enabling block building`); + await updateSequencersConfig(config, { minTxsPerBlock: 1, enforceTimeTable: true }); + await retryUntil( + async () => { + const configs = await getSequencersConfig(config); + return configs.every(c => c.minTxsPerBlock === 1); + }, + 'enable block building', + 60, + 1, + ); + logger.info(`[${label}] Block building re-enabled`); + + // 4. Wait for blocks and observe inclusion + let totalTxsMined = 0; + const blockTxCounts: { blockNumber: number; txCount: number }[] = []; + + await retryUntil( + async () => { + const currentBlock = await aztecNode.getBlockNumber(); + for (let bn = blockBeforeFlood + 1; bn <= currentBlock; bn++) { + if (blockTxCounts.some(b => b.blockNumber === bn)) { + continue; + } + const block = await aztecNode.getBlock(BlockNumber(bn)); + if (block) { + const txCount = block.body.txEffects.length; + blockTxCounts.push({ blockNumber: bn, txCount }); + totalTxsMined += txCount; + logger.info(`[${label}] Block ${bn}: ${txCount} txs (total mined: ${totalTxsMined}/${txCount})`); + } + } + return totalTxsMined >= txCount; + }, + 'all txs mined', + epochDurationSec, + 1, + ); + + // Log summary + logger.info(`=== Block Capacity Benchmark Results (${label}) ===`); + logger.info(`Total txs sent: ${txCount}`); + logger.info(`Total txs mined: ${totalTxsMined}`); + logger.info(`Blocks produced: ${blockTxCounts.length}`); + for (const { blockNumber, txCount } of blockTxCounts) { + logger.info(` Block ${blockNumber}: ${txCount} txs`); + } + + if (blockTxCounts.length > 0) { + const maxTxsInBlock = Math.max(...blockTxCounts.map(b => b.txCount)); + const avgTxsPerBlock = totalTxsMined / blockTxCounts.length; + logger.info(`Max txs in a single block: ${maxTxsInBlock}`); + logger.info(`Avg txs per block: ${avgTxsPerBlock.toFixed(1)}`); + } + + expect(totalTxsMined).toBeGreaterThanOrEqual(txCount); + + return { blockTxCounts, enabledAt }; + } + + /** Fetches block-built stats from sequencer logs and records benchmark metrics for each block. */ + async function recordBlockBuiltMetrics( + label: string, + blockTxCounts: { blockNumber: number; txCount: number }[], + enabledAt: string, + ): Promise { + const blockNumbers = new Set(blockTxCounts.map(b => b.blockNumber)); + const entries = await fetchBlockBuiltLogs(config.NAMESPACE, enabledAt, blockNumbers, logger); + + if (entries.length === 0) { + logger.warn(`[${label}] No block-built log entries found, skipping benchmark metrics`); + return; + } + + // Record metrics for each block (entries are sorted by blockNumber ascending) + for (let i = 0; i < entries.length; i++) { + const entry = entries[i]; + const prefix = `block_capacity/${label}/block_${i}`; + benchmarkData.push( + { name: `${prefix}/duration`, unit: 'ms', value: entry.duration }, + { name: `${prefix}/tx_count`, unit: 'count', value: entry.txCount }, + { name: `${prefix}/mana_per_sec`, unit: 'mana/s', value: entry.manaPerSec }, + { name: `${prefix}/public_process_duration`, unit: 'ms', value: entry.publicProcessDuration }, + { name: `${prefix}/private_log_count`, unit: 'count', value: entry.privateLogCount }, + { name: `${prefix}/public_log_count`, unit: 'count', value: entry.publicLogCount }, + { name: `${prefix}/contract_class_log_count`, unit: 'count', value: entry.contractClassLogCount }, + { name: `${prefix}/contract_class_log_size`, unit: 'fields', value: entry.contractClassLogSize }, + ); + logger.info(`[${label}] Recorded benchmark metrics from block ${entry.blockNumber} (index ${i})`, entry); + } + + benchmarkData.push({ + name: `block_capacity/${label}/blocks_produced`, + unit: 'count', + value: entries.length, + }); + } + + describe('Benchmark contract', () => { + let benchmarkContract: BenchmarkingContract; + + beforeAll(async () => { + const sponsor = new SponsoredFeePaymentMethod(await getSponsoredFPCAddress()); + // Deploy BenchmarkingContract using the first wallet + logger.info('Deploying benchmark contract...'); + benchmarkContract = await BenchmarkingContract.deploy(wallets[0]).send({ + from: accountAddresses[0], + fee: { paymentMethod: sponsor }, + }); + logger.info('BenchmarkingContract deployed', { address: benchmarkContract.address.toString() }); + + // Register benchmark contract with all other wallets + const benchMetadata = await wallets[0].getContractMetadata(benchmarkContract.address); + await Promise.all( + wallets.slice(1).map(wallet => wallet.registerContract(benchMetadata.instance!, BenchmarkingContract.artifact)), + ); + logger.info('Benchmark contract registered with all wallets'); + }); + it.each(BENCH_TESTS)('measures block capacity with %s', async (fnName, txCount) => { + logger.info(`Pre-proving ${txCount} ${fnName} txs...`); + const txs = await proveOrCloneTxs(txCount, (wallet, addr) => { + const contract = BenchmarkingContract.at(benchmarkContract.address, wallet); + return createProvableTx(wallet, addr, contract.methods[fnName]()); + }); + logger.info(`All ${txCount} ${fnName} txs pre-proven`); + const { blockTxCounts, enabledAt } = await floodAndMeasure(fnName, txs); + await recordBlockBuiltMetrics(fnName, blockTxCounts, enabledAt); + }); + }); + + describe('Token contract', () => { + let tokenContract: TokenContract; + + beforeAll(async () => { + const sponsor = new SponsoredFeePaymentMethod(await getSponsoredFPCAddress()); + // Deploy TokenContract using the first wallet + logger.info('Deploying token contract...'); + tokenContract = await TokenContract.deploy(wallets[0], accountAddresses[0], 'USDC', 'USD', 18n).send({ + from: accountAddresses[0], + fee: { paymentMethod: sponsor }, + wait: { timeout: 600 }, + }); + logger.info('TokenContract deployed', { address: tokenContract.address.toString() }); + + // Register token contract with all other wallets + const tokenMetadata = await wallets[0].getContractMetadata(tokenContract.address); + await Promise.all( + wallets.slice(1).map(wallet => wallet.registerContract(tokenMetadata.instance!, TokenContract.artifact)), + ); + logger.info('Token contract registered with all wallets'); + + // Mint tokens publicly to each account (enough for TX_COUNT transfers). + // Send sequentially to avoid PXE concurrency issues, then wait in parallel. + logger.info(`Minting 1e18 tokens to each account...`); + const mintTxHashes = []; + for (const acc of accountAddresses) { + const txHash = await TokenContract.at(tokenContract.address, wallets[0]) + .methods.mint_to_public(acc, 10n ** 18n) + .send({ from: accountAddresses[0], fee: { paymentMethod: sponsor }, wait: NO_WAIT }); + mintTxHashes.push(txHash); + } + await Promise.all(mintTxHashes.map(txHash => waitForTx(aztecNode, txHash, { timeout: 600 }))); + logger.info('Minting complete'); + }); + + it.each(TOKEN_TESTS)('measures block capacity with public token transfers', async (fnName, txCount) => { + // Each account transfers 1 token to a "sink" address. + // Note: For the clone path, all cloned txs share the same sender/recipient/amount. + // Public state conflicts may cause some cloned txs to fail during execution. + const recipient = accountAddresses[0]; + logger.info(`Pre-proving ${txCount} ${fnName} txs...`); + const txs = await proveOrCloneTxs(txCount, (wallet, addr) => { + const token = TokenContract.at(tokenContract.address, wallet); + return createProvableTx(wallet, addr, token.methods[fnName](addr, recipient, 1n, 0)); + }); + logger.info(`All ${txCount} ${fnName} txs pre-proven`); + const { blockTxCounts, enabledAt } = await floodAndMeasure(fnName, txs); + await recordBlockBuiltMetrics(fnName, blockTxCounts, enabledAt); + }); + }); +}); + +/** Clones a proven tx, randomizing nullifiers and updating fees so each clone is unique. */ +async function cloneTx(tx: Tx, aztecNode: AztecNode): Promise { + const clonedTx = Tx.clone(tx, false); + + // Fetch current minimum fees and apply 50% buffer for safety + const currentFees = await aztecNode.getCurrentMinFees(); + const paddedFees = currentFees.mul(1.5); + + // Update gas settings with current fees + (clonedTx.data.constants.txContext.gasSettings as any).maxFeesPerGas = paddedFees; + + // Randomize nullifiers to avoid conflicts + if (clonedTx.data.forRollup) { + for (let i = 0; i < clonedTx.data.forRollup.end.nullifiers.length; i++) { + if (clonedTx.data.forRollup.end.nullifiers[i].isZero()) { + continue; + } + clonedTx.data.forRollup.end.nullifiers[i] = Fr.random(); + } + } else if (clonedTx.data.forPublic) { + for (let i = 0; i < clonedTx.data.forPublic.nonRevertibleAccumulatedData.nullifiers.length; i++) { + if (clonedTx.data.forPublic.nonRevertibleAccumulatedData.nullifiers[i].isZero()) { + continue; + } + clonedTx.data.forPublic.nonRevertibleAccumulatedData.nullifiers[i] = Fr.random(); + } + } + + await clonedTx.recomputeHash(); + return clonedTx; +} diff --git a/yarn-project/end-to-end/src/spartan/utils/config.ts b/yarn-project/end-to-end/src/spartan/utils/config.ts index 3794cba69156..b93637df7429 100644 --- a/yarn-project/end-to-end/src/spartan/utils/config.ts +++ b/yarn-project/end-to-end/src/spartan/utils/config.ts @@ -8,6 +8,7 @@ const logger = createLogger('e2e:k8s-utils'); const testConfigSchema = z.object({ NAMESPACE: z.string().default('scenario'), REAL_VERIFIER: schemas.Boolean.optional().default(true), + DEBUG_FORCE_TX_PROOF_VERIFICATION: schemas.Boolean.optional().default(true), CREATE_ETH_DEVNET: schemas.Boolean.optional().default(false), L1_RPC_URLS_JSON: z.string().optional(), L1_ACCOUNT_MNEMONIC: z.string().optional(), diff --git a/yarn-project/end-to-end/src/spartan/utils/index.ts b/yarn-project/end-to-end/src/spartan/utils/index.ts index b4ecc612825f..942468858e1a 100644 --- a/yarn-project/end-to-end/src/spartan/utils/index.ts +++ b/yarn-project/end-to-end/src/spartan/utils/index.ts @@ -63,3 +63,6 @@ export { getPublicViemClient, getL1DeploymentAddresses, getNodeClient } from './ // Health checks export { ChainHealth, type ChainHealthSnapshot } from './health.js'; + +// Pod log extraction +export { type BlockBuiltLogEntry, fetchBlockBuiltLogs } from './pod_logs.js'; diff --git a/yarn-project/end-to-end/src/spartan/utils/pod_logs.ts b/yarn-project/end-to-end/src/spartan/utils/pod_logs.ts new file mode 100644 index 000000000000..b716275b7514 --- /dev/null +++ b/yarn-project/end-to-end/src/spartan/utils/pod_logs.ts @@ -0,0 +1,99 @@ +import type { Logger } from '@aztec/foundation/log'; + +import { exec } from 'child_process'; +import { promisify } from 'util'; + +import { getSequencers } from './nodes.js'; + +const execAsync = promisify(exec); + +/** Parsed l2-block-built stats from a sequencer pod log line. */ +export type BlockBuiltLogEntry = { + blockNumber: number; + txCount: number; + duration: number; + publicProcessDuration: number; + manaPerSec: number; + privateLogCount: number; + publicLogCount: number; + contractClassLogCount: number; + contractClassLogSize: number; +}; + +const FIELDS: (keyof BlockBuiltLogEntry)[] = [ + 'blockNumber', + 'txCount', + 'duration', + 'publicProcessDuration', + 'manaPerSec', + 'privateLogCount', + 'publicLogCount', + 'contractClassLogCount', + 'contractClassLogSize', +]; + +/** + * Fetches l2-block-built log entries from sequencer pods for given block numbers. + * Queries all validator pods (only the proposer will have the log for a given block). + * + * @param namespace - Kubernetes namespace + * @param sinceTime - ISO 8601 timestamp to limit log search (e.g., from before block building was re-enabled) + * @param blockNumbers - Set of block numbers to filter for + * @param logger - Logger instance + * @returns Array of parsed BlockBuiltLogEntry, de-duplicated by blockNumber, sorted ascending + */ +export async function fetchBlockBuiltLogs( + namespace: string, + sinceTime: string, + blockNumbers: Set, + logger: Logger, +): Promise { + const pods = await getSequencers(namespace); + const entriesByBlock = new Map(); + + // Subtract 60s from sinceTime to account for clock skew between test runner and k8s pods. + // Block number filtering ensures we only match the right blocks, so extra lines are harmless. + const sinceDate = new Date(new Date(sinceTime).getTime() - 60_000); + const sinceFlag = sinceDate.toISOString(); + + for (const pod of pods) { + try { + const cmd = `kubectl logs ${pod} -n ${namespace} -c aztec --since-time=${sinceFlag}`; + logger.info(`Fetching logs: ${cmd}`); + const { stdout } = await execAsync(cmd, { maxBuffer: 10 * 1024 * 1024 }); + + const lines = stdout.split('\n'); + const matchingLines = lines.filter(l => l.includes('l2-block-built')); + logger.info(`Pod ${pod}: ${lines.length} log lines, ${matchingLines.length} contain l2-block-built`); + + for (const line of matchingLines) { + try { + const parsed = JSON.parse(line); + if (parsed.eventName !== 'l2-block-built' || !blockNumbers.has(parsed.blockNumber)) { + continue; + } + if (entriesByBlock.has(parsed.blockNumber)) { + continue; + } + const entry: BlockBuiltLogEntry = {} as BlockBuiltLogEntry; + for (const field of FIELDS) { + entry[field] = parsed[field] ?? 0; + } + entriesByBlock.set(entry.blockNumber, entry); + logger.verbose(`Parsed l2-block-built log for block ${entry.blockNumber}`, entry); + } catch { + // Not valid JSON, skip + } + } + } catch (err) { + logger.warn(`Failed to fetch logs from pod ${pod}: ${err}`); + } + } + + if (entriesByBlock.size < blockNumbers.size) { + const missing = [...blockNumbers].filter(bn => !entriesByBlock.has(bn)); + logger.warn(`Missing l2-block-built logs for block(s): ${missing.join(', ')}`); + } + + return [...entriesByBlock.values()].sort((a, b) => a.blockNumber - b.blockNumber); +}