diff --git a/README-spacetime.md b/README-spacetime.md deleted file mode 100644 index 4185b0f46876..000000000000 --- a/README-spacetime.md +++ /dev/null @@ -1,74 +0,0 @@ -# Spacetime profiling - -Spacetime is a feature for showing memory usage over time within OCaml programs. It's useful -for revealing the location of memory leaks. - -## Building for Spacetime - -To use Spacetime, you need an OCaml compiler built with that feature enabled. There are -Spacetime-enabled compiler switches within OPAM. For example, there's the switch -4.07.1+spacetime, which works with same OPAM packages as vanilla 4.07.1. To build -with it in your environment, install the switch: - -``` - opam switch create 4.07.1+spacetime # or the current OCaml version for building Coda - eval $(opam env) -``` -Building some OPAM packages with Spacetime requires a large amount of stack space, so you -may need to increase the available stack provided by your shell. For bash: -``` - ulimit -s unlimited -``` -Then follow the instructions for "Building outside docker" given in README-dev.md. - -## Running for Spacetime - -Once the Coda executable has been built for Spacetime, you can run it as you would otherwise. -To obtain Spacetime profiling data, set the environment variable `OCAML_SPACETIME_INTERVAL` -to the sampling interval, in milliseconds. In practice, we've found that 10 seconds works well: - -``` - export OCAML_SPACETIME_INTERVAL=10000 -``` - -With that setting running a Coda daemon for an hour generates perhaps 500 Mb of profile data. -When done profiling, terminate the daemon with Ctrl-C, to make sure the profile data is fully -written. - -You can specify a directory where the profiling data is written, using the environment -variable `OCAML_SPACETIME_SNAPSHOT_DIR`. - -## Viewing Spacetime profiles - -Spacetime generates a profile file for each process that you run, with the name `spacetime-`. -To view the data, install the OPAM package `prof_spacetime`: - -``` - opam install prof_spacetime -``` - -You can view the profile data in a terminal, but it's probably more useful to view the profile -as interactive graphs via Web browser. To speed up viewing, you should preprocess the -profile file: - -``` - prof_spacetime process spacetime- -``` - -That generates a file `spacetime-.p`. Now start a Web server: - -``` - prof_spacetime serve -p spacetime-.p -``` - -That starts a Web server on `127.0.0.1` with port 8080. You can use the flags `--address` and -`--port` to change those options. - -It may take several seconds for the graph to be visible. You can see three different graphs, -live bytes, live words, and all allocated words. All of them show different allocation points -with different colors, and the amount of allocation over time. - -The following blog article describes how to use the graphs in more detail, and also describes -viewing the profiles in a terminal: - -* [A Brief Trip Through Spacetime](https://blog.janestreet.com/a-brief-trip-through-spacetime/) diff --git a/buildkite/scripts/debian/start_local_repo.sh b/buildkite/scripts/debian/start_local_repo.sh index b4d724a59ad6..dfe6ab404142 100755 --- a/buildkite/scripts/debian/start_local_repo.sh +++ b/buildkite/scripts/debian/start_local_repo.sh @@ -5,6 +5,22 @@ if [ -z $MINA_DEB_CODENAME ]; then exit 1 fi +# Parse command line arguments +ARCH="amd64" # default architecture + +while [[ $# -gt 0 ]]; do + case $1 in + --arch) + ARCH="$2" + shift 2 + ;; + *) + echo "Unknown option: $1" + exit 1 + ;; + esac +done + LOCAL_DEB_FOLDER=_build set -eou pipefail @@ -16,4 +32,4 @@ mkdir -p $LOCAL_DEB_FOLDER source ./buildkite/scripts/export-git-env-vars.sh ./buildkite/scripts/cache/manager.sh read --root legacy/debians "$MINA_DEB_CODENAME/*" _build ./buildkite/scripts/cache/manager.sh read "debians/$MINA_DEB_CODENAME/*" _build -./scripts/debian/aptly.sh start --codename $MINA_DEB_CODENAME --debians $LOCAL_DEB_FOLDER --component unstable --clean --background --wait \ No newline at end of file +./scripts/debian/aptly.sh start --codename $MINA_DEB_CODENAME --debians $LOCAL_DEB_FOLDER --component unstable --clean --background --wait --archs $ARCH \ No newline at end of file diff --git a/buildkite/scripts/release/manager.sh b/buildkite/scripts/release/manager.sh index 5aa0aef38aa6..90aa29707d00 100755 --- a/buildkite/scripts/release/manager.sh +++ b/buildkite/scripts/release/manager.sh @@ -1915,6 +1915,10 @@ function pull(){ __networks=${2:?$error_message} shift 2; ;; + --archs ) + __archs=${2:?$error_message} + shift 2; + ;; * ) echo -e "${RED} !! Unknown option: $1${CLEAR}\n"; echo ""; @@ -1936,6 +1940,8 @@ function pull(){ echo " - Target: $__target" echo " - Codenames: $__codenames" echo " - Networks: $__networks" + echo " - Architectures: $__archs" + if [[ -n ${__from_special_folder+x} ]]; then echo " - From special folder: $__from_special_folder" fi @@ -1947,26 +1953,28 @@ function pull(){ read -r -a __artifacts_arr <<< "$__artifacts" read -r -a __codenames_arr <<< "$__codenames" read -r -a __networks_arr <<< "$__networks" + read -r -a __archs_arr <<< "$__archs" - for __artifact in "${__artifacts_arr[@]}"; do - for __codename in "${__codenames_arr[@]}"; do - for network in "${__networks_arr[@]}"; do - echo " πŸ“₯ Pulling $__artifact for $__codename codename and $network network" - local __artifact_full_name - local __source_path - __artifact_full_name=$(get_artifact_with_suffix $__artifact $network) - - if [[ -n ${__from_special_folder+x} ]]; then - __source_path="$(storage_root "$__backend")/$__from_special_folder/${__artifact_full_name}_*" - else - __source_path="$(storage_root "$__backend")/$__buildkite_build_id/debians/$__codename/${__artifact_full_name}_*" - fi - - storage_download "$__backend" "$__source_path" "$__target" + for __arch in "${__archs_arr[@]}"; do + for __artifact in "${__artifacts_arr[@]}"; do + for __codename in "${__codenames_arr[@]}"; do + for network in "${__networks_arr[@]}"; do + echo " πŸ“₯ Pulling $__artifact for $__codename codename and $network network" + local __artifact_full_name + local __source_path + __artifact_full_name=$(get_artifact_with_suffix $__artifact $network) + + if [[ -n ${__from_special_folder+x} ]]; then + __source_path="$(storage_root "$__backend")/$__from_special_folder/${__artifact_full_name}_*_${__arch}.deb" + else + __source_path="$(storage_root "$__backend")/$__buildkite_build_id/debians/$__codename/${__artifact_full_name}_*_${__arch}.deb" + fi + + storage_download "$__backend" "$__source_path" "$__target" + done done done done - echo " βœ… Done." echo "" } diff --git a/buildkite/src/Command/DockerImage.dhall b/buildkite/src/Command/DockerImage.dhall index 236ddbd95d14..1c3eff265672 100644 --- a/buildkite/src/Command/DockerImage.dhall +++ b/buildkite/src/Command/DockerImage.dhall @@ -109,7 +109,8 @@ let generateStep = then " && echo Skipping local debian repo setup " else " && ./buildkite/scripts/debian/update.sh --verbose" - ++ " && apt-get install aptly -y && ./buildkite/scripts/debian/start_local_repo.sh" + ++ " && apt-get install aptly -y && ./buildkite/scripts/debian/start_local_repo.sh --arch ${Arch.lowerName + spec.arch}" let maybeStopDebianRepo = if spec.no_debian diff --git a/buildkite/src/Command/MinaArtifact.dhall b/buildkite/src/Command/MinaArtifact.dhall index 4835722a099d..98a1bb1eb1a2 100644 --- a/buildkite/src/Command/MinaArtifact.dhall +++ b/buildkite/src/Command/MinaArtifact.dhall @@ -139,7 +139,7 @@ let build_artifacts ] , label = "Debian: Build ${labelSuffix spec}" , key = "build-deb-pkg${Optional/default Text "" spec.suffix}" - , target = Size.XLarge + , target = Size.Multi , if_ = spec.if_ , retries = [ Command.Retry::{ diff --git a/buildkite/src/Constants/ContainerImages.dhall b/buildkite/src/Constants/ContainerImages.dhall index 5c7450903475..32802117a691 100644 --- a/buildkite/src/Constants/ContainerImages.dhall +++ b/buildkite/src/Constants/ContainerImages.dhall @@ -5,22 +5,22 @@ { toolchainBase = "codaprotocol/ci-toolchain-base:v3" , minaToolchainBookworm = { amd64 = - "gcr.io/o1labs-192920/mina-toolchain@sha256:dfa8a0eb32742900d890590875a7f7436545cd46d8c4ff147fc6a29997e5d4f3" + "gcr.io/o1labs-192920/mina-toolchain@sha256:a3517722b1573ac19f9361d42aa0b5f15a108d62fa73f97f0a74e195af1a2e90" , arm64 = - "gcr.io/o1labs-192920/mina-toolchain@sha256:8e92e0b4c9202e0e5f31afd48713d28bde903959ff7e55cbc1c080b0a8df5e3d" + "gcr.io/o1labs-192920/mina-toolchain@sha256:b4a7040b64473f89e51372ab6e11f9332730e085766aa157c25e28c6b9c0c6d4" } , minaToolchainBullseye.amd64 = - "gcr.io/o1labs-192920/mina-toolchain@sha256:bcc6b9899d5d99c83287c2735fb686a6169268d48b11262d9dfa03c1dfd0cece" + "gcr.io/o1labs-192920/mina-toolchain@sha256:524a3fb77d6702f38ad63ec737e398478e082387753b8d01e1ccf607d2917343" , minaToolchainNoble = { amd64 = - "gcr.io/o1labs-192920/mina-toolchain@sha256:0c6f1c0921c7f76be7b86948e1f9e82d8270002fc19f0b48647bb1604489268f" + "gcr.io/o1labs-192920/mina-toolchain@sha256:3fd6ebfedb905ba358903639c2ebcdbd2277316b854d80f12c74df8dcd83734b" , arm64 = - "gcr.io/o1labs-192920/mina-toolchain@sha256:182f0aa05988c5a00cc1ca5a5b651904282f3a0f7cd75faabe0a52e7d332cecb" + "gcr.io/o1labs-192920/mina-toolchain@sha256:adfff17bf21b79efc3b2d54d0648049a36fa7b54b732773bd7f2f651869c1f54" } , minaToolchainJammy.amd64 = - "gcr.io/o1labs-192920/mina-toolchain@sha256:a3e5c7dc30c67d8a9769deee282f0b5b2a513629bf272aa9b9ec7d6aee68a4e4" + "gcr.io/o1labs-192920/mina-toolchain@sha256:6db30a6faf94f7dcc23a14bcbc01d558b2164955067fd21eb92a981fe231dfe1" , minaToolchain = - "gcr.io/o1labs-192920/mina-toolchain@sha256:bcc6b9899d5d99c83287c2735fb686a6169268d48b11262d9dfa03c1dfd0cece" + "gcr.io/o1labs-192920/mina-toolchain@sha256:524a3fb77d6702f38ad63ec737e398478e082387753b8d01e1ccf607d2917343" , postgres = "postgres:12.4-alpine" , xrefcheck = "dkhamsing/awesome_bot@sha256:a8adaeb3b3bd5745304743e4d8a6d512127646e420544a6d22d9f58a07f35884" diff --git a/buildkite/src/Jobs/Test/RocksDBLedgerTarCompatibilityTest.dhall b/buildkite/src/Jobs/Test/RocksDBLedgerTarCompatibilityTest.dhall new file mode 100644 index 000000000000..7b88d401d72d --- /dev/null +++ b/buildkite/src/Jobs/Test/RocksDBLedgerTarCompatibilityTest.dhall @@ -0,0 +1,53 @@ +let S = ../../Lib/SelectFiles.dhall + +let Pipeline = ../../Pipeline/Dsl.dhall + +let PipelineTag = ../../Pipeline/Tag.dhall + +let JobSpec = ../../Pipeline/JobSpec.dhall + +let Command = ../../Command/Base.dhall + +let Size = ../../Command/Size.dhall + +let Cmd = ../../Lib/Cmds.dhall + +let Docker = ../../Command/Docker/Type.dhall + +let commands = + [ Cmd.run + "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --quiet --yes python3 python3-pip build-essential sudo curl" + , Cmd.run "./scripts/rocksdb-compatibility/install-rocksdb.sh" + , Cmd.run + "pip3 install --break-system-packages -r ./scripts/rocksdb-compatibility/requirements.txt" + , Cmd.run "python3 ./scripts/rocksdb-compatibility/test.py" + ] + +in Pipeline.build + Pipeline.Config::{ + , spec = JobSpec::{ + , dirtyWhen = + [ S.strictlyStart (S.contains "scripts/rocksdb-compatibility") + , S.exactly + "buildkite/src/Jobs/Test/RocksDBLedgerTarCompatibilityTest" + "dhall" + ] + , path = "Test" + , name = "RocksDBLedgerTarCompatibilityTest" + , tags = + [ PipelineTag.Type.Fast + , PipelineTag.Type.Test + , PipelineTag.Type.Stable + ] + } + , steps = + [ Command.build + Command.Config::{ + , commands = commands + , label = "Check RocksDB Ledger Tar Compatibility" + , key = "test" + , target = Size.Multi + , docker = Some Docker::{ image = "ubuntu:noble" } + } + ] + } diff --git a/buildkite/src/Jobs/Test/TestnetIntegrationTestsLong.dhall b/buildkite/src/Jobs/Test/TestnetIntegrationTestsLong.dhall index 8fe47ca56d25..32c27b521972 100644 --- a/buildkite/src/Jobs/Test/TestnetIntegrationTestsLong.dhall +++ b/buildkite/src/Jobs/Test/TestnetIntegrationTestsLong.dhall @@ -38,5 +38,5 @@ in Pipeline.build , PipelineTag.Type.Stable ] } - , steps = [ TestExecutive.executeLocal "hard-fork" dependsOn ] + , steps = [ TestExecutive.executeLocal "post-hard-fork" dependsOn ] } diff --git a/changes/17819.md b/changes/17819.md new file mode 100644 index 000000000000..344c12377317 --- /dev/null +++ b/changes/17819.md @@ -0,0 +1,2 @@ +Fix crash in ledger sync check that could occur when trying to load ledger +databases that were only partially synced to a network ledger diff --git a/changes/17954.md b/changes/17954.md new file mode 100644 index 000000000000..103cbca6dd34 --- /dev/null +++ b/changes/17954.md @@ -0,0 +1 @@ +Mina Daemon now use RocksDB 10.5.1 under the hood to parse genesis/epoch ledger tars. It's tested that RocksDB preserved backward compatibility on the ledger tars we serve with old RocksDB version. diff --git a/changes/17962.md b/changes/17962.md new file mode 100644 index 000000000000..bb2bde56e88b --- /dev/null +++ b/changes/17962.md @@ -0,0 +1,3 @@ +Altering mina-missing-block-auditor app. + +Correctly handling network derived from devnet/mainnet, but without pre-hardfork blocks \ No newline at end of file diff --git a/dockerfiles/stages/1-build-deps b/dockerfiles/stages/1-build-deps index 86d731f1ca8c..52f183ece127 100644 --- a/dockerfiles/stages/1-build-deps +++ b/dockerfiles/stages/1-build-deps @@ -45,7 +45,7 @@ ARG OPAM_VERSION=2.3.0 # `nix flake update opam-repository`). # - scripts/update_opam_switch.sh ARG OPAM_REPOSITORY_COMMIT=08d8c16c16dc6b23a5278b06dff0ac6c7a217356 -ARG O1LABS_OPAM_REPOSITORY_COMMIT=cabde639f92d259d4c131b00200d7a53d854ee74 +ARG O1LABS_OPAM_REPOSITORY_COMMIT=dd90c5c72b7b7caeca3db3224b2503924deea08a # Golang version number used to detemine tarball name ARG GO_VERSION=1.19.11 diff --git a/flake.lock b/flake.lock index 43ef549a3ead..7536b98649e0 100644 --- a/flake.lock +++ b/flake.lock @@ -361,17 +361,17 @@ "o1-opam-repository": { "flake": false, "locked": { - "lastModified": 1758811567, - "narHash": "sha256-U4IkjchYvesxE4q2mv3C49TQH5O6O2ogmbxCZ6hFXPo=", + "lastModified": 1760589013, + "narHash": "sha256-pugmvJB2/Ajr9sRujabatKJKwjheZMYgDpg17djfX3Y=", "owner": "o1-labs", "repo": "opam-repository", - "rev": "cabde639f92d259d4c131b00200d7a53d854ee74", + "rev": "dd90c5c72b7b7caeca3db3224b2503924deea08a", "type": "github" }, "original": { "owner": "o1-labs", "repo": "opam-repository", - "rev": "cabde639f92d259d4c131b00200d7a53d854ee74", + "rev": "dd90c5c72b7b7caeca3db3224b2503924deea08a", "type": "github" } }, diff --git a/flake.nix b/flake.nix index 133c4a1aed4a..5b753dd2e3d0 100644 --- a/flake.nix +++ b/flake.nix @@ -31,14 +31,14 @@ inputs.describe-dune.inputs.nixpkgs.follows = "nixpkgs"; inputs.describe-dune.inputs.flake-utils.follows = "utils"; - inputs.o1-opam-repository.url = "github:o1-labs/opam-repository/cabde639f92d259d4c131b00200d7a53d854ee74"; + inputs.o1-opam-repository.url = "github:o1-labs/opam-repository/dd90c5c72b7b7caeca3db3224b2503924deea08a"; inputs.o1-opam-repository.flake = false; # The version must be the same as the version used in: # - dockerfiles/1-build-deps # - flake.nix (and flake.lock after running # `nix flake update opam-repository`). - # - scripts/update_opam_switch.sh + # - scripts/update-opam-switch.sh inputs.opam-repository.url = "github:ocaml/opam-repository/08d8c16c16dc6b23a5278b06dff0ac6c7a217356"; inputs.opam-repository.flake = false; diff --git a/nix/ocaml.nix b/nix/ocaml.nix index 206f41a09953..e6e81b05359e 100644 --- a/nix/ocaml.nix +++ b/nix/ocaml.nix @@ -66,10 +66,9 @@ let }; rocksdb_stubs = - # TODO uncomment after updating rocksdb_stubs to 10.5.1 - # assert (super.rocksdb_stubs.version == pkgs.rocksdb-mina.version) - # || builtins.throw - # "rocksdb_stubs version (${super.rocksdb_stubs.version}) does not match rocksdb-mina version (${pkgs.rocksdb-mina.version})"; + assert (super.rocksdb_stubs.version == pkgs.rocksdb-mina.version) + || builtins.throw + "rocksdb_stubs version (${super.rocksdb_stubs.version}) does not match rocksdb-mina version (${pkgs.rocksdb-mina.version})"; super.rocksdb_stubs.overrideAttrs { MINA_ROCKSDB = "${pkgs.rocksdb-mina}/lib/librocksdb.a"; }; diff --git a/opam.export b/opam.export index cc46363d92c5..a20695d823f9 100644 --- a/opam.export +++ b/opam.export @@ -49,7 +49,7 @@ roots: [ "qcheck-alcotest.0.20" "re2.v0.14.0" "rocks.0.2.0" - "rocksdb_stubs.10.2.1" + "rocksdb_stubs.10.5.1" "rpc_parallel.v0.14.0" "sexp_diff_kernel.v0.14.0" "sodium.dev" @@ -250,7 +250,7 @@ installed: [ "res.5.0.1" "result.1.5" "rocks.0.2.0" - "rocksdb_stubs.10.2.1" + "rocksdb_stubs.10.5.1" "rpc_parallel.v0.14.0" "rresult.0.6.0" "seq.base" diff --git a/scripts/debian/aptly.sh b/scripts/debian/aptly.sh index 9464be2d296d..0766eb62afe3 100755 --- a/scripts/debian/aptly.sh +++ b/scripts/debian/aptly.sh @@ -28,24 +28,27 @@ check_required jq function start_aptly() { local __distribution=$1 local __debs=$2 - local __background=$3 - local __clean=$4 - local __component=$5 + local __archs=$3 + local __background=$4 + local __clean=$5 + local __component=$6 local __repo="${__distribution}"-"${__component}" - local __port=$6 - local __wait=$7 + local __port=$7 + local __wait=$8 if [ "${__clean}" = 1 ]; then rm -rf ~/.aptly fi - aptly repo create -component "${__component}" -distribution "${__distribution}" "${__repo}" + aptly repo list | grep -q "^${__repo}$" && aptly repo drop "${__repo}" || true - aptly repo add "${__repo}" "${__debs}" + aptly repo create -component "${__component}" -distribution "${__distribution}" -architectures "${__archs}" "${__repo}" - aptly snapshot create "${__component}" from repo "${__repo}" + aptly repo add -architectures "${__archs}" "${__repo}" "${__debs}" - aptly publish snapshot -distribution="${__distribution}" -skip-signing "${__component}" + aptly snapshot create -architectures "${__archs}" "${__component}" from repo "${__repo}" + + aptly publish snapshot -architectures "${__archs}" -distribution "${__distribution}" -skip-signing "${__component}" if [ "${__background}" = 1 ]; then aptly serve -listen 0.0.0.0:"${__port}" & @@ -101,6 +104,7 @@ function start(){ local __background=0 local __clean=0 local __component="unstable" + local __archs="amd64" local __port=$PORT local __wait=0 @@ -127,6 +131,10 @@ function start(){ __debs=${2:?$error_message} shift 2; ;; + -a | --archs ) + __archs=${2:?$error_message} + shift 2; + ;; -m | --component ) __component=${2:?$error_message} shift 2; @@ -149,6 +157,7 @@ function start(){ start_aptly $__distribution \ $__debs \ + $__archs \ $__background \ $__clean \ $__component \ diff --git a/scripts/debian/builder-helpers.sh b/scripts/debian/builder-helpers.sh index 2eea63aedf23..f653f2aa0abf 100755 --- a/scripts/debian/builder-helpers.sh +++ b/scripts/debian/builder-helpers.sh @@ -1,7 +1,6 @@ #!/bin/bash set -euo pipefail - SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" BUILD_DIR=${BUILD_DIR:-"${SCRIPTPATH}/../../_build"} BUILD_URL=${BUILD_URL:-${BUILDKITE_BUILD_URL:-"local build from '$(hostname)' \ @@ -219,6 +218,15 @@ copy_common_daemon_configs() { } ## LOGPROC PACKAGE ## + +# +# Builds mina-logproc package for log processing utility +# +# Output: mina-logproc_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS} (basic system libraries) +# +# Simple utility package containing only the logproc binary. +# build_logproc_deb() { create_control_file mina-logproc "${SHARED_DEPS}" \ 'Utility for processing mina-daemon log output' @@ -232,6 +240,15 @@ build_logproc_deb() { ## END LOGPROC PACKAGE ## ## GENERATE TEST_EXECUTIVE PACKAGE ## + +# +# Builds mina-test-executive package for automated testing +# +# Output: mina-test-executive_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS}${TEST_EXECUTIVE_DEPS} (includes docker, python3) +# +# Package for running automated tests against full mina testnets. +# build_test_executive_deb () { create_control_file mina-test-executive \ "${SHARED_DEPS}${TEST_EXECUTIVE_DEPS}" \ @@ -247,6 +264,15 @@ build_test_executive_deb () { ## END TEST_EXECUTIVE PACKAGE ## ## GENERATE BATCH TXN TOOL PACKAGE ## + +# +# Builds mina-batch-txn package for transaction load testing +# +# Output: mina-batch-txn_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS} +# +# Tool for generating transaction load against mina nodes. +# build_batch_txn_deb() { create_control_file mina-batch-txn "${SHARED_DEPS}" \ @@ -261,6 +287,16 @@ build_batch_txn_deb() { ## END BATCH TXN TOOL PACKAGE ## ## GENERATE TEST SUITE PACKAGE ## + +# +# Builds mina-test-suite package containing various testing utilities +# +# Output: mina-test-suite_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS} +# +# Comprehensive package with command line tests, benchmarks, archive tests, +# and performance analysis tools. Includes sample database for archive testing. +# build_functional_test_suite_deb() { create_control_file mina-test-suite "${SHARED_DEPS}" \ 'Test suite apps for mina.' @@ -319,6 +355,15 @@ function copy_common_rosetta_configs () { } ## ROSETTA MAINNET PACKAGE ## + +# +# Builds mina-rosetta-mainnet package for mainnet Rosetta API +# +# Output: mina-rosetta-mainnet_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS} +# +# Rosetta API implementation for mainnet with mainnet signature binaries. +# build_rosetta_mainnet_deb() { echo "------------------------------------------------------------" @@ -334,6 +379,15 @@ build_rosetta_mainnet_deb() { ## END ROSETTA MAINNET PACKAGE ## ## ROSETTA DEVNET PACKAGE ## + +# +# Builds mina-rosetta-devnet package for devnet Rosetta API +# +# Output: mina-rosetta-devnet_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS} +# +# Rosetta API implementation for devnet with testnet signature binaries. +# build_rosetta_devnet_deb() { echo "------------------------------------------------------------" @@ -349,6 +403,15 @@ build_rosetta_devnet_deb() { ## END ROSETTA DEVNET PACKAGE ## ## ROSETTA BERKELEY PACKAGE ## + +# +# Builds mina-rosetta-berkeley package for Berkeley testnet Rosetta API +# +# Output: mina-rosetta-berkeley_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS} +# +# Rosetta API implementation for Berkeley testnet with testnet signature binaries. +# build_rosetta_berkeley_deb() { echo "------------------------------------------------------------" @@ -364,6 +427,16 @@ build_rosetta_berkeley_deb() { ## END BERKELEY PACKAGE ## ## MAINNET PACKAGE ## + +# +# Builds mina-mainnet package for mainnet daemon +# +# Output: mina-mainnet_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS}${DAEMON_DEPS} (includes libpq-dev, jemalloc, logproc) +# +# Full mainnet daemon package with mainnet signatures and mainnet genesis ledger +# as default. Uses mainnet seed list and mainnet configuration. +# build_daemon_mainnet_deb() { echo "------------------------------------------------------------" @@ -379,12 +452,28 @@ build_daemon_mainnet_deb() { ## END MAINNET PACKAGE ## ## DEVNET PACKAGE ## + +# +# Builds devnet daemon package with profile-aware naming +# +# Output: ${MINA_DEVNET_DEB_NAME}_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Where MINA_DEVNET_DEB_NAME can be: +# - "mina-devnet" (default) +# - "mina-devnet-lightnet" (if DUNE_PROFILE=lightnet) +# - "mina-devnet-instrumented" (if DUNE_INSTRUMENT_WITH is set) +# - "mina-devnet-lightnet-instrumented" (both conditions) +# +# Dependencies: ${SHARED_DEPS}${DAEMON_DEPS} +# +# Devnet daemon with testnet signatures and devnet genesis ledger as default. +# Package name includes suffixes for different profiles and instrumentation. +# build_daemon_devnet_deb() { echo "------------------------------------------------------------" echo "--- Building testnet signatures deb without keys:" - create_control_file mina-devnet "${SHARED_DEPS}${DAEMON_DEPS}" \ + create_control_file "${MINA_DEVNET_DEB_NAME}" "${SHARED_DEPS}${DAEMON_DEPS}" \ 'Mina Protocol Client and Daemon for the Devnet Network' "${SUGGESTED_DEPS}" copy_common_daemon_configs devnet testnet 'seed-lists/devnet_seeds.txt' @@ -394,6 +483,16 @@ build_daemon_devnet_deb() { ## END DEVNET PACKAGE ## ## MAINNET LEGACY PACKAGE ## + +# +# Builds mina-mainnet-legacy package with legacy binary +# +# Output: mina-mainnet-legacy_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS}${DAEMON_DEPS} +# +# Contains only the legacy mainnet binary as "mina-legacy" without +# configuration files or genesis ledgers. +# build_daemon_mainnet_legacy_deb() { echo "------------------------------------------------------------" @@ -411,6 +510,16 @@ build_daemon_mainnet_legacy_deb() { ## END MAINNET LEGACY PACKAGE ## ## DEVNET LEGACY PACKAGE ## + +# +# Builds mina-devnet-legacy package with legacy testnet binary +# +# Output: mina-devnet-legacy_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS}${DAEMON_DEPS} +# +# Contains only the legacy testnet binary as "mina-legacy" without +# configuration files or genesis ledgers. +# build_daemon_devnet_legacy_deb() { echo "------------------------------------------------------------" @@ -428,6 +537,22 @@ build_daemon_devnet_legacy_deb() { ## END DEVNET LEGACY PACKAGE ## ## BERKELEY PACKAGE ## + +# +# Builds Berkeley testnet daemon package with profile-aware naming +# +# Output: ${MINA_DEB_NAME}_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Where MINA_DEB_NAME can be: +# - "mina-berkeley" (default) +# - "mina-berkeley-lightnet" (if DUNE_PROFILE=lightnet) +# - "mina-berkeley-instrumented" (if DUNE_INSTRUMENT_WITH is set) +# - "mina-berkeley-lightnet-instrumented" (both conditions) +# +# Dependencies: ${SHARED_DEPS}${DAEMON_DEPS} +# +# Berkeley testnet daemon with testnet signatures and berkeley genesis ledger +# as default. Package name includes suffixes for different profiles. +# build_daemon_berkeley_deb() { echo "------------------------------------------------------------" @@ -444,6 +569,19 @@ build_daemon_berkeley_deb() { } ## END BERKELEY PACKAGE ## +# +# Replaces runtime config and genesis ledgers with hardfork versions +# +# Parameters: +# $1 - Network name (mainnet, devnet, berkeley) +# +# Environment variables required: +# RUNTIME_CONFIG_JSON - path to hardfork runtime configuration +# LEDGER_TARBALLS - space-separated list of ledger tarball paths +# +# Copies hardfork-specific runtime config and ledgers, backing up existing +# network ledger as .old.json before replacement. +# replace_runtime_config_and_ledgers_with_hardforked_ones() { local NETWORK_NAME="${1}" @@ -468,6 +606,16 @@ replace_runtime_config_and_ledgers_with_hardforked_ones() { ## DEVNET HARDFORK PACKAGE ## + +# +# Builds mina-devnet-hardfork package for devnet hardfork +# +# Output: mina-devnet-hardfork_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS}${DAEMON_DEPS} +# +# Devnet daemon package with hardfork-specific runtime config and ledgers. +# Requires RUNTIME_CONFIG_JSON and LEDGER_TARBALLS environment variables. +# build_daemon_devnet_hardfork_deb() { local __deb_name=mina-devnet-hardfork @@ -488,6 +636,16 @@ build_daemon_devnet_hardfork_deb() { ## END DEVNET HARDFORK PACKAGE ## ## BERKELEY HARDFORK PACKAGE ## + +# +# Builds mina-berkeley-hardfork package for Berkeley hardfork +# +# Output: mina-berkeley-hardfork_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS}${DAEMON_DEPS} +# +# Berkeley daemon package with hardfork-specific runtime config and ledgers. +# Requires RUNTIME_CONFIG_JSON and LEDGER_TARBALLS environment variables. +# build_daemon_berkeley_hardfork_deb() { local __deb_name=mina-berkeley-hardfork @@ -508,6 +666,17 @@ build_daemon_berkeley_hardfork_deb() { ## END BERKELEY HARDFORK PACKAGE ## ## MAINNET HARDFORK PACKAGE ## + +# +# Builds mina-mainnet-hardfork package for mainnet hardfork +# +# Output: mina-mainnet-hardfork_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS}${DAEMON_DEPS} +# +# Mainnet daemon package with hardfork-specific runtime config and ledgers. +# Requires RUNTIME_CONFIG_JSON and LEDGER_TARBALLS environment variables. +# Note: Uses testnet signatures despite being mainnet hardfork package. +# build_daemon_mainnet_hardfork_deb() { local __deb_name=mina-mainnet-hardfork @@ -527,6 +696,15 @@ build_daemon_mainnet_hardfork_deb() { ## END MAINNET HARDFORK PACKAGE ## +# +# Copies common binaries and configuration for archive packages +# +# Parameters: +# $1 - Archive package name (used for build_deb call) +# +# Sets up archive daemon, archive blocks tool, extract blocks tool, +# missing blocks utilities, replayer, and SQL migration scripts. +# copy_common_archive_configs() { local ARCHIVE_DEB="${1}" @@ -552,6 +730,15 @@ copy_common_archive_configs() { } ## ARCHIVE DEVNET PACKAGE ## + +# +# Builds mina-archive-devnet package for devnet archive node +# +# Output: mina-archive-devnet_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${ARCHIVE_DEPS} (libssl, libgomp, libpq-dev, libjemalloc) +# +# Archive node package for devnet with all archive utilities and SQL scripts. +# build_archive_devnet_deb () { ARCHIVE_DEB=mina-archive-devnet @@ -567,6 +754,21 @@ build_archive_devnet_deb () { ## END ARCHIVE DEVNET PACKAGE ## ## ARCHIVE BERKELEY PACKAGE ## + +# +# Builds Berkeley archive package with profile-aware naming +# +# Output: mina-archive-berkeley${DEB_SUFFIX}_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Where DEB_SUFFIX can be: +# - "" (empty, default) +# - "-lightnet" (if DUNE_PROFILE=lightnet) +# - "-instrumented" (if DUNE_INSTRUMENT_WITH is set) +# - "-lightnet-instrumented" (both conditions) +# +# Dependencies: ${ARCHIVE_DEPS} +# +# Archive node package for Berkeley with suffix-aware naming for different profiles. +# build_archive_berkeley_deb () { ARCHIVE_DEB=mina-archive-berkeley${DEB_SUFFIX} @@ -583,6 +785,15 @@ build_archive_berkeley_deb () { ## END ARCHIVE PACKAGE ## ## ARCHIVE MAINNET PACKAGE ## + +# +# Builds mina-archive-mainnet package for mainnet archive node +# +# Output: mina-archive-mainnet_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${ARCHIVE_DEPS} +# +# Archive node package for mainnet with all archive utilities and SQL scripts. +# build_archive_mainnet_deb () { ARCHIVE_DEB=mina-archive-mainnet @@ -598,6 +809,15 @@ build_archive_mainnet_deb () { ## END ARCHIVE MAINNET PACKAGE ## ## ZKAPP TEST TXN ## + +# +# Builds mina-zkapp-test-transaction package for zkApp testing +# +# Output: mina-zkapp-test-transaction_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS}${DAEMON_DEBS} +# +# Utility for generating zkApp transactions in Mina GraphQL format for testing. +# build_zkapp_test_transaction_deb () { echo "------------------------------------------------------------" echo "--- Building Mina Berkeley ZkApp test transaction tool:" @@ -614,7 +834,17 @@ build_zkapp_test_transaction_deb () { } ## END ZKAPP TEST TXN PACKAGE ## - +## CREATE LEGACY GENESIS PACKAGE ## + +# +# Builds mina-create-legacy-genesis package for legacy genesis creation +# +# Output: mina-create-legacy-genesis_${MINA_DEB_VERSION}_${ARCHITECTURE}.deb +# Dependencies: ${SHARED_DEPS}${DAEMON_DEBS} +# +# Utility for creating legacy genesis ledgers for post-hardfork verification. +# Contains the runtime_genesis_ledger tool for Mina protocol. +# build_create_legacy_genesis_deb() { echo "------------------------------------------------------------" echo "--- Building Mina Berkeley create legacy genesis tool:" @@ -628,4 +858,5 @@ build_create_legacy_genesis_deb() { "${BUILDDIR}/usr/local/bin/mina-create-legacy-genesis" build_deb mina-create-legacy-genesis -} \ No newline at end of file +} +## END CREATE LEGACY GENESIS PACKAGE ## \ No newline at end of file diff --git a/scripts/rocksdb-compatibility/.gitignore b/scripts/rocksdb-compatibility/.gitignore new file mode 100644 index 000000000000..13d5ab46f9fd --- /dev/null +++ b/scripts/rocksdb-compatibility/.gitignore @@ -0,0 +1,2 @@ +/.venv +/__pycache__ diff --git a/scripts/rocksdb-compatibility/README.md b/scripts/rocksdb-compatibility/README.md new file mode 100644 index 000000000000..3c78a798df18 --- /dev/null +++ b/scripts/rocksdb-compatibility/README.md @@ -0,0 +1,3 @@ +To run this test: +1. Run `install-rocksdb.sh` (preferably in a docker because it installs to system library) to ensure rocksdb dyn lib are installed +2. Run `test.py` inside a venv where everything in `requirements.txt` is installed diff --git a/scripts/rocksdb-compatibility/install-rocksdb.sh b/scripts/rocksdb-compatibility/install-rocksdb.sh new file mode 100755 index 000000000000..94f0bffb5b9c --- /dev/null +++ b/scripts/rocksdb-compatibility/install-rocksdb.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -euox pipefail + +ROCKSDB_VERSION=10.5.1 + +ROCKSDB_SOURCE=$(mktemp -d --tmpdir rocksdb-$ROCKSDB_VERSION.XXXXXX) + +# shellcheck disable=SC2064 +trap "rm -rf $ROCKSDB_SOURCE" EXIT + +curl -L https://github.com/facebook/rocksdb/archive/refs/tags/v${ROCKSDB_VERSION}.tar.gz | tar xz -C $ROCKSDB_SOURCE + +cd $ROCKSDB_SOURCE/rocksdb-${ROCKSDB_VERSION} + +# NOTE: +# `-Wno-unused-parameter` is to fix this error: +# util/compression.cc:684:40: error: unused parameter β€˜args’ [-Werror=unused-parameter] +# 684 | Status ExtractUncompressedSize(Args& args) override { +# | ~~~~~~^~~~ +sudo EXTRA_CXXFLAGS="-Wno-unused-parameter" make -j"$(nproc)" install-shared + +# Refresh LD cache so follow up programs can locate the dyn libaray +sudo ldconfig diff --git a/scripts/rocksdb-compatibility/requirements.txt b/scripts/rocksdb-compatibility/requirements.txt new file mode 100644 index 000000000000..40fdc62a1d50 --- /dev/null +++ b/scripts/rocksdb-compatibility/requirements.txt @@ -0,0 +1,3 @@ +cffi==2.0.0 +tqdm==4.65 +pycurl==7.45.7 diff --git a/scripts/rocksdb-compatibility/rocksdb.py b/scripts/rocksdb-compatibility/rocksdb.py new file mode 100644 index 000000000000..6097b85aa6c5 --- /dev/null +++ b/scripts/rocksdb-compatibility/rocksdb.py @@ -0,0 +1,100 @@ +from cffi import FFI +from contextlib import contextmanager + +ffi = FFI() + +ffi.cdef(""" +typedef struct rocksdb_t rocksdb_t; +typedef struct rocksdb_options_t rocksdb_options_t; +typedef struct rocksdb_readoptions_t rocksdb_readoptions_t; +typedef struct rocksdb_iterator_t rocksdb_iterator_t; + +rocksdb_options_t* rocksdb_options_create(void); +void rocksdb_options_destroy(rocksdb_options_t*); +void rocksdb_options_set_create_if_missing(rocksdb_options_t*, unsigned char); + +rocksdb_t* rocksdb_open(const rocksdb_options_t* options, const char* name, char** errptr); +void rocksdb_close(rocksdb_t* db); + +rocksdb_readoptions_t* rocksdb_readoptions_create(void); +void rocksdb_readoptions_destroy(rocksdb_readoptions_t*); + +rocksdb_iterator_t* rocksdb_create_iterator(rocksdb_t* db, const rocksdb_readoptions_t* options); +void rocksdb_iter_destroy(rocksdb_iterator_t* iter); +void rocksdb_iter_seek_to_first(rocksdb_iterator_t* iter); +unsigned char rocksdb_iter_valid(const rocksdb_iterator_t* iter); +void rocksdb_iter_next(rocksdb_iterator_t* iter); +const char* rocksdb_iter_key(const rocksdb_iterator_t* iter, size_t* klen); +const char* rocksdb_iter_value(const rocksdb_iterator_t* iter, size_t* vlen); +""") + +# Load the library +rocksdb = ffi.dlopen("librocksdb.so") + +@contextmanager +def rocksdb_options(create_if_missing=False): + opts = rocksdb.rocksdb_options_create() + rocksdb.rocksdb_options_set_create_if_missing(opts, int(create_if_missing)) + try: + yield opts + finally: + rocksdb.rocksdb_options_destroy(opts) + +@contextmanager +def open_db(path, options): + err_ptr = ffi.new("char**") + db = rocksdb.rocksdb_open(options, path.encode('utf-8'), err_ptr) + if err_ptr[0] != ffi.NULL: + raise RuntimeError("Open error: " + ffi.string(err_ptr[0]).decode()) + try: + yield db + finally: + rocksdb.rocksdb_close(db) + +def read_iter(db): + """ + Generator that yields (key, value) pairs from a RocksDB database. + + Args: + db (rocksdb_t*): A RocksDB database handle. + + Yields: + tuple[bytes, bytes]: The (key, value) pairs from the database. + """ + ropts = rocksdb.rocksdb_readoptions_create() + it = rocksdb.rocksdb_create_iterator(db, ropts) + try: + rocksdb.rocksdb_iter_seek_to_first(it) + while rocksdb.rocksdb_iter_valid(it): + klen = ffi.new("size_t*") + vlen = ffi.new("size_t*") + key_ptr = rocksdb.rocksdb_iter_key(it, klen) + val_ptr = rocksdb.rocksdb_iter_value(it, vlen) + yield ( + bytes(ffi.buffer(key_ptr, klen[0])), + bytes(ffi.buffer(val_ptr, vlen[0])), + ) + rocksdb.rocksdb_iter_next(it) + finally: + rocksdb.rocksdb_iter_destroy(it) + rocksdb.rocksdb_readoptions_destroy(ropts) + +def test(path, rounds): + """ + Iterate over a RocksDB database and print key-value pairs in hexadecimal. + + Args: + path (str): Path to the RocksDB database. + rounds (int): Number of key-value pairs to read from the start of the database. + + Behavior: + - Opens the database in read-only mode (does not create a new DB). + - Uses a RocksDB iterator to traverse from the first key. + - Prints each key-value pair as hexadecimal strings. + - Stops early if the iterator reaches the end of the DB before 'rounds' entries. + """ + with rocksdb_options(create_if_missing=False) as opts, open_db(path, opts) as db: + for i, (key, val) in enumerate(read_iter(db)): + print(f"Found KV-pair: {key.hex()} -> {val.hex()}") + if i + 1 >= rounds: + break diff --git a/scripts/rocksdb-compatibility/test.py b/scripts/rocksdb-compatibility/test.py new file mode 100644 index 000000000000..eaeca1143014 --- /dev/null +++ b/scripts/rocksdb-compatibility/test.py @@ -0,0 +1,101 @@ +import os +import random +import tarfile +import tempfile +import xml.etree.ElementTree as ET +from io import BytesIO +from typing import List +from urllib.parse import urljoin + +import pycurl +from tqdm import tqdm + +import rocksdb + +NUM_LEDGER_TARS = 5 +NUM_KV_PER_LEDGER = 10 + +# Match keys starting with "genesis_ledger" or "epoch_ledger" and ending with ".tar.gz" +def matches_pattern(key: str) -> bool: + return (key.startswith("genesis_ledger") or key.startswith("epoch_ledger")) and key.endswith(".tar.gz") + + +def download_file(url: str, dest_path: str) -> None: + with open(dest_path, "wb") as f: + # Create a progress bar (tqdm) + pbar = tqdm(unit="B", unit_scale=True, unit_divisor=1024, ncols=80) + + def progress(download_t, download_d, _upload_t, _upload_d): + _ = (_upload_t, _upload_d) # Make pyright happier + if download_t > 0: + pbar.total = download_t + pbar.update(download_d - pbar.n) + + c = pycurl.Curl() + c.setopt(pycurl.URL, url) + c.setopt(pycurl.WRITEDATA, f) + c.setopt(pycurl.FOLLOWLOCATION, True) + c.setopt(pycurl.NOPROGRESS, False) + c.setopt(pycurl.XFERINFOFUNCTION, progress) + c.perform() + c.close() + + pbar.close() + + +def extract_tar_gz(tar_path: str, target_dir: str) -> None: + with tarfile.open(tar_path, "r:gz") as tar: + tar.extractall(path=target_dir) + +# TODO: figure out how to enable SSL here +def list_s3_keys(url, matches_pattern) -> List[str] : + buffer = BytesIO() + c = pycurl.Curl() + c.setopt(pycurl.URL, url) + c.setopt(pycurl.WRITEDATA, buffer) + c.setopt(pycurl.FOLLOWLOCATION, True) + c.setopt(pycurl.SSL_VERIFYPEER, False) + c.setopt(pycurl.SSL_VERIFYHOST, 0) + c.perform() + status_code = c.getinfo(pycurl.RESPONSE_CODE) + c.close() + + if status_code != 200: + raise RuntimeError(f"Failed to list S3 bucket: {status_code}") + + data = buffer.getvalue() + root = ET.fromstring(data) + ns = {"s3": "http://s3.amazonaws.com/doc/2006-03-01/"} + tar_keys = [ + text + for elem in root.findall(".//s3:Contents/s3:Key", ns) + if (text := elem.text) is not None and matches_pattern(text) + ] + return tar_keys + +def main(): + tar_keys = list_s3_keys("https://snark-keys.o1test.net.s3.amazonaws.com/", matches_pattern) + + if not tar_keys: + raise RuntimeError("No ledger tar files found.") + + for tar_key in random.sample(tar_keys, min(NUM_LEDGER_TARS, len(tar_keys))): + tar_uri = urljoin("https://s3-us-west-2.amazonaws.com/snark-keys.o1test.net/", tar_key) + print(f"Testing RocksDB compatibility on {tar_uri}") + + with tempfile.TemporaryDirectory() as tmpdir: + tar_path = os.path.join(tmpdir, os.path.basename(tar_key)) + print(f" Downloading to {tar_path}...") + download_file(tar_uri, tar_path) + + db_path = os.path.join(tmpdir, "extracted") + os.makedirs(db_path, exist_ok=True) + print(f" Extracting to {db_path}...") + extract_tar_gz(tar_path, db_path) + + print(f" Testing extracted RocksDB at {db_path}") + rocksdb.test(db_path, NUM_KV_PER_LEDGER) + + +if __name__ == "__main__": + main() diff --git a/scripts/update-opam-switch.sh b/scripts/update-opam-switch.sh index ef98ab2396d9..24af72561cde 100755 --- a/scripts/update-opam-switch.sh +++ b/scripts/update-opam-switch.sh @@ -29,7 +29,8 @@ ocaml_version=4.14.2 # - flake.nix (and flake.lock after running # `nix flake update opam-repository`). # - scripts/update_opam_switch.sh -opam_repository_commit=08d8c16c16dc6b23a5278b06dff0ac6c7a217356 +OPAM_REPOSITORY_COMMIT=08d8c16c16dc6b23a5278b06dff0ac6c7a217356 +O1LABS_OPAM_REPOSITORY_COMMIT=dd90c5c72b7b7caeca3db3224b2503924deea08a if [[ -d _opam ]]; then read -rp "Directory '_opam' exists and will be removed. You can also bypass the check by setting the variable BYPASS_OPAM_SWITCH_UPDATE to any value. Continue? [y/N] " \ @@ -48,14 +49,14 @@ if [[ ! -d "${switch_dir}" ]]; then # We add o1-labs opam repository and make it default # (if it's repeated, it's a no-op) opam repository add --yes --set-default o1-labs \ - https://github.com/o1-labs/opam-repository.git + "https://github.com/o1-labs/opam-repository.git#${O1LABS_OPAM_REPOSITORY_COMMIT}" # The default opam repository is set to a specific commit as some of our # dependencies have been archived. # See https://github.com/MinaProtocol/mina/pull/17450 opam repository \ set-url \ default \ - "git+https://github.com/ocaml/opam-repository.git#${opam_repository_commit}" + "git+https://github.com/ocaml/opam-repository.git#${OPAM_REPOSITORY_COMMIT}" opam update opam switch import -y --switch . opam.export mkdir -p opam_switches diff --git a/src/app/archive/downgrade_to_berkeley.sql b/src/app/archive/downgrade_to_berkeley.sql index ffde5826620e..6580a7e4ebcc 100644 --- a/src/app/archive/downgrade_to_berkeley.sql +++ b/src/app/archive/downgrade_to_berkeley.sql @@ -18,7 +18,7 @@ SET archive.current_protocol_version = '4.0.0'; -- Post-HF protocol version. This one corresponds to Mesa, specifically SET archive.target_protocol_version = '3.0.0'; -- The version of this script. If you modify the script, please bump the version -SET archive.migration_version = '0.0.3'; +SET archive.migration_version = '0.0.4'; -- TODO: put below in a common script diff --git a/src/app/archive/upgrade_to_mesa.sql b/src/app/archive/upgrade_to_mesa.sql index e265d0e2d710..3c033422f49b 100644 --- a/src/app/archive/upgrade_to_mesa.sql +++ b/src/app/archive/upgrade_to_mesa.sql @@ -18,7 +18,7 @@ SET archive.current_protocol_version = '3.0.0'; -- Post-HF protocol version. This one corresponds to Mesa, specifically SET archive.target_protocol_version = '4.0.0'; -- The version of this script. If you modify the script, please bump the version -SET archive.migration_version = '0.0.3'; +SET archive.migration_version = '0.0.4'; -- TODO: put below in a common script @@ -162,17 +162,39 @@ SELECT add_zkapp_states_nullable_element(30); SELECT add_zkapp_states_nullable_element(31); -- 3. `zkapp_states`: Add columns element8..element31 + +CREATE OR REPLACE FUNCTION get_zero_field_id() RETURNS int AS $$ +DECLARE + result int; + zero text := '0'; +BEGIN + -- Try to fetch existing id + SELECT id INTO result FROM zkapp_field WHERE field = zero; + + -- If not found, insert and get the new id + IF result IS NULL THEN + INSERT INTO zkapp_field(field) + VALUES (zero) + RETURNING id INTO result; + END IF; + + RETURN result; +END +$$ LANGUAGE plpgsql; + CREATE OR REPLACE FUNCTION add_zkapp_states_element(p_element_num INT) RETURNS VOID AS $$ DECLARE col_name TEXT := 'element' || p_element_num; + default_id int := get_zero_field_id(); BEGIN RAISE DEBUG 'Adding column % for zkapp_states', col_name; EXECUTE format( - 'ALTER TABLE zkapp_states ADD COLUMN IF NOT EXISTS %I INT DEFAULT 0 NOT NULL REFERENCES zkapp_field(id)', - col_name + 'ALTER TABLE zkapp_states ADD COLUMN IF NOT EXISTS %I INT DEFAULT %s NOT NULL REFERENCES zkapp_field(id)', + col_name, + default_id ); RAISE DEBUG 'Added column % for zkapp_states', col_name; @@ -184,6 +206,14 @@ EXCEPTION END $$ LANGUAGE plpgsql; +DO $$ +DECLARE + default_id int := get_zero_field_id(); +BEGIN + RAISE NOTICE 'Zero field in table zkapp_field is of id = %', default_id; +END +$$; + SELECT add_zkapp_states_element(8); SELECT add_zkapp_states_element(9); SELECT add_zkapp_states_element(10); diff --git a/src/app/missing_blocks_auditor/missing_blocks_auditor.ml b/src/app/missing_blocks_auditor/missing_blocks_auditor.ml index a6d43ea4bc1a..ca867cc294ce 100644 --- a/src/app/missing_blocks_auditor/missing_blocks_auditor.ml +++ b/src/app/missing_blocks_auditor/missing_blocks_auditor.ml @@ -42,9 +42,25 @@ let main ~archive_uri () = ~metadata:[ ("error", `String (Caqti_error.show msg)) ] ; exit 1 in - (* filter out genesis block *) + (* filters out genesis or first fork block. This is needed as they are not considered missing and + archive blocks can start from block with height > 1 in case of sandbox network forked from devnet/mainnet. + Archive of such network won't include genesis block but fork block as a first block *) + let%bind genesis_or_fork_block_height = + match%bind + Mina_caqti.Pool.use + (fun db -> Sql.GenesisOrFirstForkBlockHeight.run db ()) + pool + with + | Ok height -> + return height + | Error msg -> + [%log error] "Error getting genesis or first fork block height" + ~metadata:[ ("error", `String (Caqti_error.show msg)) ] ; + exit 1 + in let missing_blocks = - List.filter missing_blocks_raw ~f:(fun (_, _, height, _) -> height <> 1) + List.filter missing_blocks_raw ~f:(fun (_, _, height, _) -> + height <> genesis_or_fork_block_height ) in let%bind () = if List.is_empty missing_blocks then @@ -134,8 +150,14 @@ let main ~archive_uri () = [%log info] "Length of canonical chain is %Ld blocks" chain_len else ( add_error chain_length_error ; - [%log info] "Length of canonical chain is %Ld blocks, expected: %Ld" - chain_len highest_canonical ) ; + if genesis_or_fork_block_height = 1 then + [%log info] "Length of canonical chain is %Ld blocks, expected: %Ld" + chain_len highest_canonical + else + [%log info] + "Length of canonical chain is %Ld blocks, expected: %Ld. (Note: \ + genesis or first fork block has height %d)" + chain_len highest_canonical genesis_or_fork_block_height ) ; let invalid_chain = List.filter canonical_chain ~f:(fun (_block_id, _state_hash, chain_status) -> diff --git a/src/app/missing_blocks_auditor/sql.ml b/src/app/missing_blocks_auditor/sql.ml index 7aa4108f0275..f8f32b431573 100644 --- a/src/app/missing_blocks_auditor/sql.ml +++ b/src/app/missing_blocks_auditor/sql.ml @@ -14,6 +14,20 @@ module Unparented_blocks = struct let run (module Conn : Mina_caqti.CONNECTION) () = Conn.collect_list query () end +module GenesisOrFirstForkBlockHeight = struct + let query = + Mina_caqti.find_req Caqti_type.unit Caqti_type.int + {sql| SELECT height FROM blocks + WHERE parent_id IS NULL + AND global_slot_since_hard_fork = 0 + AND chain_status = 'canonical' + ORDER BY height ASC + LIMIT 1 + |sql} + + let run (module Conn : Mina_caqti.CONNECTION) height = Conn.find query height +end + module Missing_blocks_gap = struct let query = Mina_caqti.find_req Caqti_type.int Caqti_type.int diff --git a/src/app/test_executive/hard_fork.ml b/src/app/test_executive/post_hard_fork.ml similarity index 99% rename from src/app/test_executive/hard_fork.ml rename to src/app/test_executive/post_hard_fork.ml index 62a0f361445c..c1233692de63 100644 --- a/src/app/test_executive/hard_fork.ml +++ b/src/app/test_executive/post_hard_fork.ml @@ -1,4 +1,4 @@ -(* hard_fork.ml -- run nodes with fork config, epoch ledger *) +(* post_hard_fork.ml -- run nodes with fork config, epoch ledger *) open Core open Integration_test_lib diff --git a/src/app/test_executive/test_executive.ml b/src/app/test_executive/test_executive.ml index 73031fdccd31..687bd5798c34 100644 --- a/src/app/test_executive/test_executive.ml +++ b/src/app/test_executive/test_executive.ml @@ -57,7 +57,7 @@ let tests : test list = , (module Chain_reliability_test.Make : Intf.Test.Functor_intf) ) ; ("epoch-ledger", (module Epoch_ledger.Make : Intf.Test.Functor_intf)) ; ("gossip-consis", (module Gossip_consistency.Make : Intf.Test.Functor_intf)) - ; ("hard-fork", (module Hard_fork.Make : Intf.Test.Functor_intf)) + ; ("post-hard-fork", (module Post_hard_fork.Make : Intf.Test.Functor_intf)) ; ("medium-bootstrap", (module Medium_bootstrap.Make : Intf.Test.Functor_intf)) ; ("payments", (module Payments_test.Make : Intf.Test.Functor_intf)) ; ( "peers-reliability" diff --git a/src/lib/merkle_ledger/any_ledger.ml b/src/lib/merkle_ledger/any_ledger.ml index dcc8845f20ee..f01943fa6739 100644 --- a/src/lib/merkle_ledger/any_ledger.ml +++ b/src/lib/merkle_ledger/any_ledger.ml @@ -81,6 +81,8 @@ module Make_base (Inputs : Intf.Inputs.Intf) : let set_at_index_exn (T ((module Base), t)) = Base.set_at_index_exn t + let get_at_index (T ((module Base), t)) = Base.get_at_index t + let get_at_index_exn (T ((module Base), t)) = Base.get_at_index_exn t let set_batch ?hash_cache (T ((module Base), t)) = @@ -118,6 +120,8 @@ module Make_base (Inputs : Intf.Inputs.Intf) : let token_owners (T ((module Base), t)) = Base.token_owners t + let iteri_untrusted (T ((module Base), t)) = Base.iteri_untrusted t + let iteri (T ((module Base), t)) = Base.iteri t (* ignored_keys must be Base.Keys.Set.t, but that isn't necessarily the same as Keys.Set.t for the diff --git a/src/lib/merkle_ledger/converting_merkle_tree.ml b/src/lib/merkle_ledger/converting_merkle_tree.ml index 35f5c9ad386d..58dc18859a96 100644 --- a/src/lib/merkle_ledger/converting_merkle_tree.ml +++ b/src/lib/merkle_ledger/converting_merkle_tree.ml @@ -109,6 +109,8 @@ end) let to_list_sequential t = Primary_ledger.to_list_sequential t.primary_ledger + let iteri_untrusted t ~f = Primary_ledger.iteri_untrusted t.primary_ledger ~f + let iteri t ~f = Primary_ledger.iteri t.primary_ledger ~f let foldi t ~init ~f = Primary_ledger.foldi t.primary_ledger ~init ~f @@ -180,6 +182,8 @@ end) ~f:(fun (loc, account) -> (loc, convert account)) located_accounts ) + let get_at_index t idx = Primary_ledger.get_at_index t.primary_ledger idx + let get_at_index_exn t idx = Primary_ledger.get_at_index_exn t.primary_ledger idx @@ -264,12 +268,12 @@ struct Primary_db.num_accounts db1 = Converting_db.num_accounts db2 && let is_synced = ref true in - Primary_db.iteri db1 ~f:(fun idx stable_account -> - let expected_unstable_account = convert stable_account in - let actual_unstable_account = Converting_db.get_at_index_exn db2 idx in + Primary_db.iteri_untrusted db1 ~f:(fun idx stable_account -> + let expected_unstable_account = Option.map ~f:convert stable_account in + let actual_unstable_account = Converting_db.get_at_index db2 idx in if not - (Inputs.converted_equal expected_unstable_account + (Option.equal Inputs.converted_equal expected_unstable_account actual_unstable_account ) then is_synced := false ) ; !is_synced diff --git a/src/lib/merkle_ledger/database.ml b/src/lib/merkle_ledger/database.ml index 2d2a138cea45..663dd4b010b7 100644 --- a/src/lib/merkle_ledger/database.ml +++ b/src/lib/merkle_ledger/database.ml @@ -278,9 +278,13 @@ module Make (Inputs : Intf.Inputs.DATABASE) = struct Option.map (last_location mdb) ~f:Location.to_path_exn end - let get_at_index_exn mdb index = + let get_at_index mdb index = let addr = Addr.of_int_exn ~ledger_depth:mdb.depth index in - get mdb (Location.Account addr) |> Option.value_exn + get mdb (Location.Account addr) + + let get_at_index_exn mdb index = + get_at_index mdb index + |> Option.value_exn ~message:"Expected account at index" ~here:[%here] let all_accounts (t : t) = match Account_location.last_location_address t with @@ -572,13 +576,19 @@ module Make (Inputs : Intf.Inputs.DATABASE) = struct | Ok location -> Ok (`Existed, location) - let iteri t ~f = + let iteri_untrusted t ~f = match Account_location.last_location_address t with | None -> () | Some last_addr -> Sequence.range ~stop:`inclusive 0 (Addr.to_int last_addr) - |> Sequence.iter ~f:(fun i -> f i (get_at_index_exn t i)) + |> Sequence.iter ~f:(fun i -> f i (get_at_index t i)) + + let iteri t ~f = + iteri_untrusted t ~f:(fun index account_opt -> + f index + (Option.value_exn ~message:"Expected account at index" ~here:[%here] + account_opt ) ) (* TODO : if key-value store supports iteration mechanism, like RocksDB, maybe use that here, instead of loading all accounts into memory See Issue diff --git a/src/lib/merkle_ledger/intf.ml b/src/lib/merkle_ledger/intf.ml index 9b8841fd5c4c..28b482e4d80a 100644 --- a/src/lib/merkle_ledger/intf.ml +++ b/src/lib/merkle_ledger/intf.ml @@ -314,6 +314,9 @@ module Ledger = struct (** list of accounts via slower sequential mechanism *) val to_list_sequential : t -> account list + (** iterate over all indexes and accounts, if the ledger is not known to be sound *) + val iteri_untrusted : t -> f:(index -> account option -> unit) -> unit + (** iterate over all indexes and accounts *) val iteri : t -> f:(index -> account -> unit) -> unit @@ -385,6 +388,8 @@ module Ledger = struct val set_batch : ?hash_cache:hash Addr.Map.t -> t -> (Location.t * account) list -> unit + val get_at_index : t -> int -> account option + val get_at_index_exn : t -> int -> account val set_at_index_exn : t -> int -> account -> unit @@ -542,6 +547,8 @@ module Ledger = struct module Config : Config + val dbs_synced : primary_ledger -> converting_ledger -> bool + (** Create a new converting merkle tree with the given configuration. If [In_directories] is given, existing databases will be opened and used to back the converting merkle tree. If the converting database does not exist diff --git a/src/lib/merkle_ledger/null_ledger.ml b/src/lib/merkle_ledger/null_ledger.ml index 066b62644fc6..2d22279fe23e 100644 --- a/src/lib/merkle_ledger/null_ledger.ml +++ b/src/lib/merkle_ledger/null_ledger.ml @@ -95,6 +95,8 @@ end = struct let set_at_index_exn _t = failwith "set_at_index_exn: null ledgers cannot be mutated" + let get_at_index _t _index = None + let get_at_index_exn _t = failwith "get_at_index_exn: null ledgers are empty" let set_batch ?hash_cache:_ _t = @@ -130,6 +132,8 @@ end = struct let tokens _t _pk = Token_id.Set.empty + let iteri_untrusted _t ~f:_ = () + let iteri _t ~f:_ = () let fold_until _t ~init ~f:_ ~finish = Async.Deferred.return @@ finish init diff --git a/src/lib/merkle_ledger/test/test_converting.ml b/src/lib/merkle_ledger/test/test_converting.ml index f849717d2e8b..905a02d347ab 100644 --- a/src/lib/merkle_ledger/test/test_converting.ml +++ b/src/lib/merkle_ledger/test/test_converting.ml @@ -207,6 +207,59 @@ struct [%test_eq: Migrated.Account.t] stored_migrated_account (Db_converting.convert primary_account) ) ) ) ) + let () = + add_test + "sync detection fails without crashing after accounts are added at high \ + addresses" (fun () -> + with_primary ~f:(fun primary -> + let depth = Db.depth primary in + let max_height = Int.min 5 depth - 1 in + populate_primary_db primary max_height ; + with_migrated ~f:(fun migrated -> + let _converting = + Db_converting.of_ledgers_with_migration primary migrated + in + let additional_account = Quickcheck.random_value Account.gen in + let high_index = (1 lsl Int.min 5 depth) - 1 in + let additional_account_addr = + Db.Addr.of_int_exn ~ledger_depth:depth high_index + in + (* Using set_batch_accounts with a high address like this leaves + the databases in an inconsistent state, because it updates + the last added account in the databases but doesn't fill in + the accounts at lower addresses. This state is similar to + what you might get after an incomplete ledger sync. *) + Db.set_batch_accounts primary + [ (additional_account_addr, additional_account) ] ; + Db_migrated.set_batch_accounts migrated + [ ( additional_account_addr + , Db_converting.convert additional_account ) + ] ; + assert (Db_converting.dbs_synced primary migrated) ) ) ) + + let () = + add_test "sync detection fails after converting ledger account is mutated" + (fun () -> + with_primary ~f:(fun primary -> + let depth = Db.depth primary in + let max_height = Int.min 5 depth in + populate_primary_db primary max_height ; + let account_to_mutate = Db.get_at_index_exn primary 0 in + let new_balance, _overflow_flag = + Balance.add_amount_flagged account_to_mutate.balance + Currency.Amount.one + in + let mutated_account = + Db_converting.convert + { account_to_mutate with balance = new_balance } + in + with_migrated ~f:(fun migrated -> + let _converting = + Db_converting.of_ledgers_with_migration primary migrated + in + Db_migrated.set_at_index_exn migrated 0 mutated_account ; + assert (not (Db_converting.dbs_synced primary migrated)) ) ) ) + let () = add_test "create converting ledger, populate randomly, test iteration order" (fun () -> diff --git a/src/lib/merkle_mask/masking_merkle_tree.ml b/src/lib/merkle_mask/masking_merkle_tree.ml index 4fd9eddfd628..0f2170f3e0e4 100644 --- a/src/lib/merkle_mask/masking_merkle_tree.ml +++ b/src/lib/merkle_mask/masking_merkle_tree.ml @@ -879,10 +879,15 @@ module Make (Inputs : Inputs_intf.S) = struct let addr = Location.to_path_exn location in Addr.to_int addr - let get_at_index_exn t index = + let get_at_index t index = assert_is_attached t ; let addr = Addr.of_int_exn ~ledger_depth:t.depth index in - get t (Location.Account addr) |> Option.value_exn + get t (Location.Account addr) + + let get_at_index_exn t index = + assert_is_attached t ; + get_at_index t index + |> Option.value_exn ~message:"Expected account at index" ~here:[%here] let set_at_index_exn t index account = assert_is_attached t ; @@ -906,11 +911,17 @@ module Make (Inputs : Inputs_intf.S) = struct let%map.Async.Deferred accts = to_list t in List.map accts ~f:Account.identifier |> Account_id.Set.of_list - let iteri t ~f = + let iteri_untrusted t ~f = assert_is_attached t ; let num_accounts = num_accounts t in Sequence.range ~stop:`exclusive 0 num_accounts - |> Sequence.iter ~f:(fun i -> f i (get_at_index_exn t i)) + |> Sequence.iter ~f:(fun i -> f i (get_at_index t i)) + + let iteri t ~f = + iteri_untrusted t ~f:(fun index account_opt -> + f index + (Option.value_exn ~message:"Expected account at index" ~here:[%here] + account_opt ) ) let foldi_with_ignored_accounts t ignored_accounts ~init ~f = assert_is_attached t ;