diff --git a/.dockerignore b/.dockerignore index 0b28cdc6f4..51b0b2efdb 100644 --- a/.dockerignore +++ b/.dockerignore @@ -14,6 +14,7 @@ docs/api/tezos-client.html docs/api/tezos-admin-client.html tezos-node +tezos-tx-rollup-* tezos-validator tezos-protocol-compiler tezos-client @@ -24,6 +25,7 @@ tezos-accuser-* tezos-proxy-server tezos-signer tezos-sc-rollup-node-* +tezos-sc-rollup-client-* scripts/opam-test-all.sh.DONE scripts/create_genesis/src diff --git a/.gitattributes b/.gitattributes index 6274577110..05381e5bbf 100644 --- a/.gitattributes +++ b/.gitattributes @@ -9,3 +9,4 @@ devtools export-ignore docs/active export-ignore src/lib_version/current_git_info.ml export-subst +src/lib_version/exe/get_git_info.ml export-subst diff --git a/.gitignore b/.gitignore index d751a42b17..47f8166031 100644 --- a/.gitignore +++ b/.gitignore @@ -10,17 +10,12 @@ __pycache__ /_opam /_docker_build /_snoop -/docs/api/tezos-client.html -/docs/api/tezos-admin-client.html -/docs/api/tezos-signer.html -/docs/api/tezos-accuser-alpha.html -/docs/api/tezos-baker-alpha.html -/docs/api/tezos-endorser-alpha.html /test_results /manifest/manifest /tezos-node +/tezos-tx-rollup-* /tezos-validator /tezos-protocol-compiler /tezos-client @@ -35,19 +30,13 @@ __pycache__ /tezos-snoop /tezos-tps-evaluation /tezos-sc-rollup-node-* +/tezos-sc-rollup-client-* /src/proto_*/parameters/*-parameters.json /scripts/opam-test-all.sh.DONE /scripts/create_genesis/src -/docs/introduction/readme.rst -/docs/api/errors.rst -/docs/0*/rpc.rst -/docs/alpha/rpc.rst -/docs/shell/rpc.rst -/docs/shell/p2p_api.rst - /src/bin_client/test/LOG.* /_coverage_output/*.coverage /_coverage_report diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ab7a9d85f1..9c81bf70b2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,32 +28,45 @@ workflow: # The "manual" stage exists to fix a UI problem that occurs when mixing # manual and non-manual jobs. stages: - - bootstrap + - packaging - sanity - build - test - doc - - packaging - build_release - publish_release - test_coverage - manual +# All the jobs belonging to a stage X should be in +# .gitlab/ci/X.yml. The stage "manual" is an exception to this rule, +# as jobs of that stage are not logically related. + +# If the configuration for stage X is long, it can be broken up in +# subfiles .gitlab/ci/X/a.yml, .gitlab/ci/X/b.yml, etc (see e.g. the +# stage test). + +# Finally, templates shall not be defined in one stage file and used +# in another. That is, a template defined in test.yml shall not be +# used in doc.yml (but it's fine to use it in a subfile test/X.yml). + include: - .gitlab/ci/templates.yml # Actual jobs are defined in these included files - - .gitlab/ci/bootstrap-opam-pipeline.yml + - .gitlab/ci/packaging.yml - .gitlab/ci/sanity.yml - .gitlab/ci/build.yml - - .gitlab/ci/lints.yml - - .gitlab/ci/unittest.yml - - .gitlab/ci/integration.yml - - .gitlab/ci/liquidity-baking-scripts-integrity.yml - - .gitlab/ci/coq.yml - - .gitlab/ci/tezt.yml + - .gitlab/ci/test/templates.yml + - .gitlab/ci/test/lints.yml + - .gitlab/ci/test/unit.yml + - .gitlab/ci/test/integration.yml + - .gitlab/ci/test/liquidity-baking-scripts-integrity.yml + - .gitlab/ci/test/coq.yml + - .gitlab/ci/test/tezt.yml - .gitlab/ci/doc.yml - - .gitlab/ci/publish.yml - - .gitlab/ci/coverage.yml - - .gitlab/ci/test-doc-scripts.yml + - .gitlab/ci/doc/test-scripts.yml + - .gitlab/ci/build_release.yml + - .gitlab/ci/publish_release.yml + - .gitlab/ci/test_coverage.yml diff --git a/.gitlab/ci/bootstrap-opam-pipeline.yml b/.gitlab/ci/bootstrap-opam-pipeline.yml deleted file mode 100644 index ccfc759ab5..0000000000 --- a/.gitlab/ci/bootstrap-opam-pipeline.yml +++ /dev/null @@ -1,24 +0,0 @@ -opam:create_pipeline: - extends: - - .default_settings_template - - .image_template__runtime_build_test_dependencies_template - - .rules_template__trigger_opam_pipeline - stage: bootstrap - script: - - ./scripts/generate_opam_pipeline.sh - - cat opam-ci.yml - artifacts: - paths: - - opam-ci.yml - -opam:trigger: - extends: - - .rules_template__trigger_opam_pipeline - stage: packaging - needs: - - "opam:create_pipeline" - trigger: - include: - - artifact: opam-ci.yml - job: "opam:create_pipeline" - strategy: depend diff --git a/.gitlab/ci/build.yml b/.gitlab/ci/build.yml index 1674bc550e..3570faf705 100644 --- a/.gitlab/ci/build.yml +++ b/.gitlab/ci/build.yml @@ -1,17 +1,5 @@ --- -.build_template: - extends: - - .default_settings_template - - .image_template__runtime_build_test_dependencies_template - - .template__coverage - stage: build - before_script: - - . ./scripts/version.sh - # Load the environment poetry previously created in the docker image. - # Give access to the Python dependencies/executables - - . $HOME/.venv/bin/activate - .build: extends: .build_template needs: [] @@ -29,11 +17,13 @@ fi - diff poetry.lock /home/tezos/poetry.lock - diff pyproject.toml /home/tezos/pyproject.toml - - dune build @runtest_dune_template + # We add $COVERAGE_OPTIONS to all dune build commands to enable reuse of + # build artifacts. + - dune build $COVERAGE_OPTIONS @runtest_dune_template # 2. Actually build - - make all build-test + - make all build-sandbox # 3. Also build the tps evaluation tool which is not part of the default build. - - dune build src/bin_tps_evaluation + - dune build $COVERAGE_OPTIONS src/bin_tps_evaluation # 4. clean-up caches before uploading the cache - opam clean cache: @@ -56,6 +46,8 @@ build_arm64: - .rules_template__development_arm64 variables: ARCH: "arm64" + # Disable coverage for arm64 + COVERAGE_OPTIONS: "" tags: - arm64 @@ -65,7 +57,6 @@ build_x86_64: - .rules_template__development variables: ARCH: "x86_64" - COVERAGE_OPTIONS: --instrument-with bisect_ppx # similar to the build template above, this template # compiles all binaries using the static profile. @@ -73,6 +64,8 @@ build_x86_64: extends: - .default_settings_template - .image_template__runtime_build_dependencies_template + variables: + ARCH: "" before_script: - sudo apk --no-cache --virtual add unzip wget eudev-dev autoconf automake libtool linux-headers binutils zlib-static # dune build @install make ocamlopt stack overflow when compiling @@ -83,12 +76,12 @@ build_x86_64: @src/lib_protocol_compiler/install $(for i in src/{,proto_*/}bin_* ; do echo @$i/install ; done) --profile static - - mkdir tezos-binaries && install -t tezos-binaries _build/install/default/bin/* + - mkdir -p "tezos-binaries/$ARCH" && install -t "tezos-binaries/$ARCH" _build/install/default/bin/* # Strip debug symbols in binaries - - find tezos-binaries -maxdepth 1 -type f ! -name "*.*" -exec strip --strip-debug {} \; + - find "tezos-binaries/$ARCH" -maxdepth 1 -type f ! -name "*.*" -exec strip --strip-debug {} \; artifacts: paths: - - tezos-binaries/* + - "tezos-binaries/$ARCH/*" # Static binaries for x86_64 are necessary for development branches, as we want to make # sure the build does not break. We also need it for release tags, to as its artifacts @@ -100,14 +93,18 @@ build:static-x86_64-linux-binaries: script: - sudo apk --no-cache --virtual add upx # Compress resulting binaries - - find tezos-binaries -maxdepth 1 -type f ! -name "*.*" -exec upx {} \; + - find tezos-binaries/x86_64 -maxdepth 1 -type f ! -name "*.*" -exec upx {} \; + variables: + ARCH: "x86_64" stage: build build:static-arm64-linux-binaries: extends: - .build_static_binaries_template - .rules_template__master_and_releases - stage: build_release + variables: + ARCH: "arm64" + stage: build script: - echo "No compression for now" tags: diff --git a/.gitlab/ci/build_release.yml b/.gitlab/ci/build_release.yml new file mode 100644 index 0000000000..12a41f5089 --- /dev/null +++ b/.gitlab/ci/build_release.yml @@ -0,0 +1,84 @@ +--- +.build_docker_release_template: + extends: + - .default_settings_template + - .image_template__latest # Docker-in-Docker (dind) + - .docker_registry_auth # Sets up a before_script + variables: + IMAGE_ARCH_PREFIX: "" + script: + - apk --no-cache --virtual add git + # Environment variables from before_script + - . ./scripts/ci/docker.env + # Build minimal, bare and debug images + - ./scripts/create_docker_image.sh + "${DOCKER_IMAGE_NAME}" + "${DOCKER_IMAGE_TAG}" + "${build_deps_image_name}" + "${build_deps_image_version}" + "${CI_COMMIT_SHORT_SHA}" + # auth gitlab or dockerhub registry + # notice the different namespace for gitlab and that we remove the `-` + # Test bare image + - ./scripts/ci/docker_smoke_test.sh "${DOCKER_IMAGE_NAME}bare:${DOCKER_IMAGE_TAG}" "${CI_COMMIT_SHORT_SHA}" version + # Push minimal, bare and debug images + - ./scripts/ci/docker_push_all.sh + +docker:manual_build_amd64: + extends: + - .build_docker_release_template + - .rules_template__development_docker + variables: + IMAGE_ARCH_PREFIX: "amd64_" + stage: manual + interruptible: false + needs: [] + +docker:manual_build_arm64: + extends: + - .build_docker_release_template + - .rules_template__development_docker + variables: + IMAGE_ARCH_PREFIX: "arm64_" + stage: manual + interruptible: false + needs: [] + tags: + - arm64 + +docker:build_amd64: + extends: + - .build_docker_release_template + - .rules_template__master_and_releases + variables: + IMAGE_ARCH_PREFIX: "amd64_" + stage: build + +docker:build_arm64: + extends: + - .build_docker_release_template + - .rules_template__master_and_releases + variables: + IMAGE_ARCH_PREFIX: "arm64_" + stage: build + tags: + - arm64 + +upload-gitlab-packages: + extends: + - .rules_template__master_and_releases + image: registry.gitlab.com/gitlab-org/release-cli + variables: + PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/tezos/${CI_COMMIT_TAG}" + stage: build_release + needs: + - build:static-arm64-linux-binaries + - build:static-x86_64-linux-binaries + script: + - apk --no-cache --virtual add jq curl git + # we run the smoketest only for x86_64 since this job is executed + # on x86_64 + - scripts/ci/static_smoke_test.sh x86_64 version + - scripts/release/upload-static-binaries-to-package-registry.sh x86_64 + - scripts/release/upload-static-binaries-to-package-registry.sh arm64 + - scripts/release/upload-tarball-to-package-registry.sh diff --git a/.gitlab/ci/coverage.yml b/.gitlab/ci/coverage.yml deleted file mode 100644 index 3631954b9f..0000000000 --- a/.gitlab/ci/coverage.yml +++ /dev/null @@ -1,48 +0,0 @@ -# This job fetchs coverage files by precedent test stage. It creates the html, -# summary and cobertura reports. It also provide a coverage % for the merge request. - -unified_coverage: - extends: - - .default_settings_template - - .rules_template__development_unified_coverage - - .image_template__runtime_build_test_dependencies_template - - .template__coverage - stage: test_coverage - # This job requires all artifacts from the stage test, so we override - # the `dependencies: []` in `.default_settings` by setting `dependencies` - # to `null`. - dependencies: - before_script: - # Load the environment poetry previously created in the docker image. - # Give access to the Python dependencies/executables - - . "$HOME"/.venv/bin/activate - script: > - if [ "$CI_COMMIT_BRANCH" = "$TEZOS_DEFAULT_BRANCH" ] || [ "$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME" = "$TEZOS_DEFAULT_BRANCH" ]; then - # On the default branch (master), we fetch coverage from the latest merged MR. - COVERAGE_START_COMMIT=$CI_COMMIT_SHA poetry run python3 scripts/ci/coverage.py; - else - # On the development branches, we compute coverage - CORRUPTED_FILES=$(find "$BISECT_FILE" -name \*.corrupted.coverage -type f -print | wc -l); - if [ "$CORRUPTED_FILES" != 0 ]; then - echo "Corrupted files were found, please report this in https://gitlab.com/tezos/tezos/-/issues/1529:"; - find "$BISECT_FILE" -name \*.corrupted.coverage -type f -print; - if [ "$SLACK_COVERAGE_TOKEN" != "" ]; then - scripts/send_slack_alert_coverage.sh $SLACK_COVERAGE_TOKEN $SLACK_COVERAGE_CHANNEL $CI_PIPELINE_URL; - fi - exit 1; - fi - make coverage-report; - # We rewrite the output of the summary to remove the points information - # matching the coverage regexp below. - make coverage-report-summary | sed 's@Coverage: [[:digit:]]\+/[[:digit:]]\+ (\(.*%\))@Coverage: \1@'; - make coverage-report-cobertura - fi - coverage: '/Coverage: ([^%]+%)/' - artifacts: - expose_as: 'Coverage report' - when: always - reports: - cobertura: _coverage_report/cobertura.xml - paths: - - _coverage_report/ - expire_in: 15 days diff --git a/.gitlab/ci/doc.yml b/.gitlab/ci/doc.yml index 91ca70f9c4..a5258db931 100644 --- a/.gitlab/ci/doc.yml +++ b/.gitlab/ci/doc.yml @@ -62,3 +62,44 @@ documentation:linkcheck: - make -C docs linkcheck - make -C docs sanitycheck allow_failure: true + +# here we use this hack to publish the tezos documentation on +# gitlab.io because we want to publish the doc for the project +# tezos under https://tezos.gitlab.io and not https://tezos.gitlab.io/tezos +# The latter follows the gitlab url convention of +# https://.gitlab.io// +# Notice that we push only if CI_COMMIT_REF_NAME is really master . +# This allows to test the release workflow +publish:documentation: + extends: + - .default_settings_template + - .image_template__runtime_build_test_dependencies_template + - .rules_template__master + stage: doc + before_script: + - sudo apk add --no-cache openssh-client rsync + - echo "${CI_PK_GITLAB_DOC}" > ~/.ssh/id_ed25519 + - echo "${CI_KH}" > ~/.ssh/known_hosts + - chmod 400 ~/.ssh/id_ed25519 + # Load the environment poetry previously created in the docker image. + # Give access to the Python dependencies/executables + - . $HOME/.venv/bin/activate + script: + - if [ "${CI_COMMIT_REF_NAME}" == "master" ] ; then + make -C docs all ; + git clone --depth 5 git@gitlab.com:${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAMESPACE}.gitlab.io gitlab.io ; + rsync --recursive --links --perms --delete + --exclude=.doctrees --exclude={{main,alpha,zero}net,master}/index.html + docs/_build/ gitlab.io/public/ ; + cd gitlab.io ; + else + echo "Skip pushing documentation. Only pushing for real master" ; + fi + - if [ -z "$(git status -s)" ] ; then + echo "Nothing to commit!" ; + else + git add public ; + git commit -m "Import doc of ${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAME}:${CI_COMMIT_SHA}" ; + git push origin master ; + fi + interruptible: false diff --git a/.gitlab/ci/doc/test-scripts.yml b/.gitlab/ci/doc/test-scripts.yml new file mode 100644 index 0000000000..f714814b7e --- /dev/null +++ b/.gitlab/ci/doc/test-scripts.yml @@ -0,0 +1,95 @@ +.base-test-doc-scripts-template: + stage: doc + needs: [] + before_script: + # Trigger an allowed fail on runner that do not have the tezos_infra tag + - if ! echo $CI_RUNNER_TAGS | grep -qe '\btezos_infra\b'; then + echo -e "\e[33m/.\ This test is skipped on runners lacking the tezos_infra tag\e[0m"; + exit 137; + fi + allow_failure: + exit_codes: 137 + +.install_bin_ubuntu_template: + extends: + - .default_settings_template + - .base-test-doc-scripts-template + - .rules_template__development_documentation + script: + - ./docs/introduction/install-bin-ubuntu.sh + +install_bin_bionic: + image: public.ecr.aws/lts/ubuntu:18.04_stable + extends: .install_bin_ubuntu_template + +install_bin_focal: + image: public.ecr.aws/lts/ubuntu:20.04_stable + extends: .install_bin_ubuntu_template + +.install_bin_rc_ubuntu_template: + extends: + - .default_settings_template + - .base-test-doc-scripts-template + - .rules_template__development_documentation + script: + - ./docs/introduction/install-bin-rc-ubuntu.sh + +install_bin_rc_bionic: + image: public.ecr.aws/lts/ubuntu:18.04_stable + extends: .install_bin_rc_ubuntu_template + +install_bin_rc_focal: + image: public.ecr.aws/lts/ubuntu:20.04_stable + extends: .install_bin_rc_ubuntu_template + +.install_bin_fedora_template: + extends: + - .default_settings_template + - .base-test-doc-scripts-template + - .rules_template__development_documentation + script: + - ./docs/introduction/install-bin-fedora.sh + +install_bin_fedora_34: + image: fedora:34 + extends: .install_bin_fedora_template + +.install_bin_rc_fedora_template: + extends: + - .default_settings_template + - .base-test-doc-scripts-template + - .rules_template__development_documentation + script: + - ./docs/introduction/install-bin-rc-fedora.sh + +install_bin_rc_fedora_34: + image: fedora:34 + extends: .install_bin_rc_fedora_template + +.install_opam_ubuntu_template: + extends: + - .default_settings_template + - .base-test-doc-scripts-template + - .rules_template__development_documentation + script: + - ./docs/introduction/install-opam.sh + +install_opam_bionic: + image: ocaml/opam:ubuntu-18.04 + extends: .install_opam_ubuntu_template + +install_opam_focal: + image: ocaml/opam:ubuntu-20.04 + extends: .install_opam_ubuntu_template + +.compile_sources_template: + extends: + - .default_settings_template + - .base-test-doc-scripts-template + - .rules_template__development_documentation + script: + - ./docs/introduction/compile-sources.sh + +compile_sources_buster: + image: ocaml/opam:debian-10 + extends: .compile_sources_template diff --git a/.gitlab/ci/integration.yml b/.gitlab/ci/integration.yml deleted file mode 100644 index 3c6a6c0a66..0000000000 --- a/.gitlab/ci/integration.yml +++ /dev/null @@ -1,145 +0,0 @@ -# Definition for the environment to run all integration tests. -# This is also used by Tezt tests. -# In general we do not have to run make, which takes a while, -# because the binaries have been produced by the build job and are -# in the cache. But if they are not, we need to build them. -# Ideally we should also check that the baker / accuser / endorser -# exist (some tests use them) but their actual name depend on the protocol. -.integration_template: - extends: - - .test_template - - .template__coverage_files - dependencies: ["build_x86_64"] - before_script: - - if [ ! -f tezos-node ] || [ ! -f tezos-client ] || [ ! -f tezos-codec ] || [ ! -f tezos-sandbox ]; then make; fi - -# integration:proto:sandbox and integration:compiler-rejections do not -# require access to the binaries like the "true" integration tests -# below. Therefore, they do not extend the .integration_template. - -integration:proto:sandbox: - extends: .test_template - script: - - dune build @runtest_sandbox - -integration:compiler-rejections: - extends: .test_template - script: - - dune build @runtest_rejections - -############################################################ -## Stage: run scripts to check they are working properly ## -############################################################ - -script:prepare_migration_test: - extends: .test_template - before_script: - - last_proto_name=$(find src -name "proto_[0-9][0-9][0-9]_*" | awk -F'/' '{print $NF}' | sort -r | head -1) - - last_proto_version=$(echo $last_proto_name | cut -d'_' -f2) - - new_proto_version=$(printf "%03d" $((10#$last_proto_version + 1))) - - make - script: - - ./scripts/prepare_migration_test.sh manual "next_$new_proto_version" 1 - -script:snapshot_alpha_and_link: - extends: .test_template - script: - - last_proto_name=$(find src -name "proto_[0-9][0-9][0-9]_*" | awk -F'/' '{print $NF}' | sort -r | head -1) - - last_proto_version=$(echo $last_proto_name | cut -d'_' -f2) - - new_proto_version=$(printf "%03d" $((10#$last_proto_version + 1))) - - make tezos-protocol-compiler - - ./scripts/snapshot_alpha_and_link.sh "$new_proto_version" next - - make - - dune build src/proto_"$new_proto_version"_*/ - -############################################################ -## Stage: run OCaml integration tests ## -############################################################ - -integration:sandboxes:acc-endorsement: - extends: .integration_template - script: - - TMP=$PWD make -f sandbox.Makefile accusations_simple_double_endorsing - artifacts: - paths: - - flextesa-acc-sde - - $BISECT_FILE - expire_in: 1 day - when: always - -integration:sandboxes:u-a-u: - extends: .integration_template - script: - - TMP=$PWD make -f sandbox.Makefile user_activated_upgrade_next - - TMP=$PWD make -f sandbox.Makefile user_activated_upgrade_alpha - artifacts: - paths: - - flextesa-hard-fork - - flextesa-hard-fork-alpha - # FIXME https://gitlab.com/tezos/tezos/-/issues/2189 - # This job can produce corrupted coverage file - # - $BISECT_FILE - expire_in: 1 day - when: always - -integration:sandboxes:daemons-upgrade: - extends: .integration_template - script: - - TMP=$PWD make -f sandbox.Makefile daemons_upgrade_next - - TMP=$PWD make -f sandbox.Makefile daemons_upgrade_alpha - artifacts: - paths: - - flextesa-daemons-upgrade - - flextesa-daemons-upgrade-alpha - expire_in: 1 day - when: always - -############################################################ -## Stage: run python integration tests ## -############################################################ - -# definition for the environment to run all integration tests -# integration tests are run only on x86_64 architectures (for now) -.integration_python_template: - extends: - - .test_template - needs: - - build_x86_64 - dependencies: - - build_x86_64 - before_script: - # Load the environment poetry previously created in the docker image. - # Give access to the Python dependencies/executables - - . $HOME/.venv/bin/activate - - mkdir tests_python/tmp - - cd tests_python - # python scripts don't need the _build directory - # but only the binaries in the artifacts - cache: {} - -integration:pytest: - extends: - - .integration_python_template - - .template__coverage_files - # the number of jobs have been choosen to give jobs of maximal - # length ~10 minutes and to accommodate the addition of new protocol - # test suites without increasing wall time of the test stage. - parallel: 25 - script: - - poetry run pytest --exitfirst --prev-junit-xml test-results.xml --job $CI_NODE_INDEX/$CI_NODE_TOTAL --color=yes --log-dir=tmp "--junitxml=reports/report_${CI_NODE_INDEX}_${CI_NODE_TOTAL}.xml" --timeout 1800 - artifacts: - paths: - - tests_python/tmp/ - - tests_python/reports/*.xml - - $BISECT_FILE - reports: - junit: tests_python/reports/*.xml - expire_in: 7 day - when: always - -integration:pytest_examples: - extends: .integration_python_template - script: - - PYTHONPATH=$PYTHONPATH:./ poetry run python examples/forge_transfer.py - - PYTHONPATH=$PYTHONPATH:./ poetry run python examples/example.py - - PYTHONPATH=./ poetry run pytest --exitfirst examples/test_example.py diff --git a/.gitlab/ci/lints.yml b/.gitlab/ci/lints.yml deleted file mode 100644 index 65270f59d6..0000000000 --- a/.gitlab/ci/lints.yml +++ /dev/null @@ -1,63 +0,0 @@ -misc_checks: - extends: .build_template - stage: test - needs: [] - script: - # checks that all deps of opam packages are already installed - - ./scripts/opam-check.sh - # misc linting - - find . ! -path "./_opam/*" -name "*.opam" -exec opam lint {} +; - - make check-linting - - make check-python-linting - # python checks - - make -C tests_python typecheck - # Ensure that all unit tests are restricted to their opam package - - make lint-tests-pkg - # check that the hack-module patch applies cleanly - - git apply devtools/protocol-print/add-hack-module.patch - # check that yes-wallet builds correctly - - dune build scripts/yes-wallet/yes_wallet.exe - artifacts: - when: always - paths: - - opam_repo.patch - expire_in: 1 days - -semgrep: - extends: - - .rules_template__development - # We specify the image by hash to avoid flakiness. Indeed, if we took the - # latest release, then an update in the parser or analyser could result in new - # errors being found even if the code doesn't change. This would place the - # burden for fixing the code on the wrong dev (the devs who happen to open an - # MR coinciding with the semgrep update rather than the dev who wrote the - # infringing code in the first place). - # Update the hash in scripts/semgrep/README.md too when updating it here - # Last update: 20212-01-03 - image: returntocorp/semgrep-agent:sha-c6cd7cf - stage: test - needs: [] - script: - - echo "OCaml code linting. For information on how to reproduce locally, check out scripts/semgrep/README.md" - - sh ./scripts/semgrep/lint-all-ocaml-sources.sh - -check_precommit_hook: - extends: .build_template - stage: test - needs: [] - script: - - ./scripts/pre_commit/pre_commit.py --test-itself - - poetry run pylint scripts/pre_commit/pre_commit.py - - poetry run pycodestyle scripts/pre_commit/pre_commit.py - - poetry run mypy scripts/pre_commit/pre_commit.py - -check_scripts_b58_prefix: - # Can be changed to a python image, but using the build docker image to keep - # in sync with the python version used for the tests - extends: .test_template - needs: [] - before_script: - - . $HOME/.venv/bin/activate - script: - - poetry run pylint scripts/b58_prefix/b58_prefix.py --disable=missing-docstring --disable=invalid-name - - poetry run pytest scripts/b58_prefix/test_b58_prefix.py -v diff --git a/.gitlab/ci/packaging.yml b/.gitlab/ci/packaging.yml new file mode 100644 index 0000000000..b221a7ceee --- /dev/null +++ b/.gitlab/ci/packaging.yml @@ -0,0 +1,24 @@ +opam:create_pipeline: + extends: + - .default_settings_template + - .image_template__runtime_build_test_dependencies_template + - .rules_template__trigger_opam_pipeline + stage: packaging + script: + - ./scripts/generate_opam_pipeline.sh + - cat opam-ci.yml + artifacts: + paths: + - opam-ci.yml + +opam:trigger: + extends: + - .rules_template__trigger_opam_pipeline + stage: packaging + needs: + - "opam:create_pipeline" + trigger: + include: + - artifact: opam-ci.yml + job: "opam:create_pipeline" + strategy: depend diff --git a/.gitlab/ci/publish.yml b/.gitlab/ci/publish.yml deleted file mode 100644 index 78fca63d68..0000000000 --- a/.gitlab/ci/publish.yml +++ /dev/null @@ -1,228 +0,0 @@ -# Setup authentication for either Docker Hub (release) or GitLab registry (dev). -# Also setup Docker names such that they are valid for the target (Docker Hub or GitLab). -# Docker constraints on tags: https://docs.docker.com/engine/reference/commandline/tag/ -# -# A tag name must be valid ASCII and may contain lowercase and -# uppercase letters, digits, underscores, periods and dashes. A tag -# name may not start with a period or a dash and may contain a maximum -# of 128 characters. -# -# To detect dev mode we test the value of $MASTER_OR_RELEASE -# The variable MASTER_OR_RELEASE is set in the template -# .rules_template__master_and_releases -# GitLab image name must follow this format: -# https://docs.gitlab.com/ee/user/packages/container_registry/#image-naming-convention -.docker_registry_auth: - before_script: - - mkdir ~/.docker || true - - if [ "$MASTER_OR_RELEASE" = "true" ]; then - export DOCKER_IMAGE_NAME="docker.io/${CI_PROJECT_PATH}-" ; - echo "{ \"auths\":{ \"https://index.docker.io/v1/\":{ \"auth\":\"${CI_DOCKER_AUTH}\" } } }" > ~/.docker/config.json ; - else - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY ; - export DOCKER_IMAGE_NAME="${CI_REGISTRY}/${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAME}/" ; - fi - - TAG_NAME=$(echo "${IMAGE_ARCH_PREFIX}${CI_COMMIT_REF_NAME}" | tr -c -- '-._\n[:alnum:]' '_') - - echo "Building images ${DOCKER_IMAGE_NAME%?} ( /bare & /debug ) ${TAG_NAME}" - -.build_docker_release_template: - extends: - - .default_settings_template - - .image_template__latest - - .docker_registry_auth # this setup a before_script for the auth - services: - - docker:dind - variables: - DOCKER_DRIVER: overlay2 - IMAGE_ARCH_PREFIX: "" - script: - - apk --no-cache --virtual add git - # Build normal, bare and debug image - - ./scripts/create_docker_image.sh - "${DOCKER_IMAGE_NAME}" - "${TAG_NAME}" - "${build_deps_image_name}" - "${build_deps_image_version}" - "${CI_COMMIT_SHORT_SHA}" - # auth gitlab or dockerhub registry - # notice the different namespace for gitlab and that we remove the `-` - # Test the bare image - - ./scripts/ci/docker_smoke_test.sh "${DOCKER_IMAGE_NAME}bare:${TAG_NAME}" "${CI_COMMIT_SHORT_SHA}" version - - docker push "${DOCKER_IMAGE_NAME%?}:${TAG_NAME}" ; - - docker push "${DOCKER_IMAGE_NAME}bare:${TAG_NAME}" ; - - docker push "${DOCKER_IMAGE_NAME}debug:${TAG_NAME}" ; - interruptible: false - -publish:docker_manual_amd64: - extends: - - .build_docker_release_template - - .rules_template__development_docker - variables: - IMAGE_ARCH_PREFIX: "amd64_" - stage: manual - needs: [] - -publish:docker_manual_arm64: - extends: - - .build_docker_release_template - - .rules_template__development_docker - variables: - IMAGE_ARCH_PREFIX: "arm64_" - stage: manual - needs: [] - tags: - - arm64 - -build_release:docker_amd64: - extends: - - .build_docker_release_template - - .rules_template__master_and_releases - variables: - IMAGE_ARCH_PREFIX: "amd64_" - stage: build_release - tags: - - safe_docker - -build_release:docker_arm64: - extends: - - .build_docker_release_template - - .rules_template__master_and_releases - variables: - IMAGE_ARCH_PREFIX: "arm64_" - stage: build_release - tags: - - arm64 - -.release_static_binaries_template: - extends: - - .rules_template__release_tag - image: registry.gitlab.com/gitlab-org/release-cli - variables: - ARCH_PREFIX: "" - PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/tezos/${CI_COMMIT_TAG}" - stage: publish_release - script: - - apk --no-cache --virtual add bash jq curl - - scripts/release/upload-static-binaries-to-package-registry.sh "$ARCH_PREFIX" - -release-static-x86_64-binaries: - extends: .release_static_binaries_template - variables: - ARCH_PREFIX: "x86_64-" - dependencies: - - build:static-x86_64-linux-binaries - -release-static-arm64-binaries: - extends: .release_static_binaries_template - variables: - ARCH_PREFIX: "arm64-" - dependencies: - - build:static-arm64-linux-binaries - -release-on-gitlab: - extends: - - .rules_template__release_tag - image: registry.gitlab.com/gitlab-org/release-cli - variables: - PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/tezos/${CI_COMMIT_TAG}" - stage: publish_release - script: - - apk --no-cache --virtual add bash jq - - scripts/release/create-release-with-static-binaries.sh - -# Note: here we rely on $IMAGE_ARCH_PREFIX to be empty. -# Otherwise, $TAG_NAME would contain $IMAGE_ARCH_PREFIX too. -# $IMAGE_ARCH_PREFIX is only used when building Docker images, -# here we handle all architectures so there is no such variable. -merge-manifest: - extends: - - .rules_template__master_and_releases - - .docker_registry_auth - image: docker:latest - services: - - name: "docker:dind" - command: ["--experimental"] - variables: - DOCKER_DRIVER: overlay2 - stage: publish_release - script: - - apk add git - - LAST_COMMIT_DATE_TIME=$(git log --pretty=format:"%cd" -1 --date="format:%Y%m%d%H%M%S" 2>&1) - - - docker pull "${DOCKER_IMAGE_NAME}bare:amd64_${TAG_NAME}" - - docker pull "${DOCKER_IMAGE_NAME}bare:arm64_${TAG_NAME}" - - docker manifest create "${DOCKER_IMAGE_NAME}bare:${TAG_NAME}" - --amend "${DOCKER_IMAGE_NAME}bare:amd64_${TAG_NAME}" - --amend "${DOCKER_IMAGE_NAME}bare:arm64_${TAG_NAME}" - - docker manifest push "${DOCKER_IMAGE_NAME}bare:${TAG_NAME}" - - - docker manifest create "${DOCKER_IMAGE_NAME}bare:${TAG_NAME}_${CI_COMMIT_SHORT_SHA}_${LAST_COMMIT_DATE_TIME}" - --amend "${DOCKER_IMAGE_NAME}bare:amd64_${TAG_NAME}" - --amend "${DOCKER_IMAGE_NAME}bare:arm64_${TAG_NAME}" - - docker manifest push "${DOCKER_IMAGE_NAME}bare:${TAG_NAME}_${CI_COMMIT_SHORT_SHA}_${LAST_COMMIT_DATE_TIME}" - - - docker pull "${DOCKER_IMAGE_NAME}debug:amd64_${TAG_NAME}" - - docker pull "${DOCKER_IMAGE_NAME}debug:arm64_${TAG_NAME}" - - docker manifest create "${DOCKER_IMAGE_NAME}debug:${TAG_NAME}" - --amend "${DOCKER_IMAGE_NAME}debug:amd64_${TAG_NAME}" - --amend "${DOCKER_IMAGE_NAME}debug:arm64_${TAG_NAME}" - - docker manifest push "${DOCKER_IMAGE_NAME}debug:${TAG_NAME}" - - - docker manifest create "${DOCKER_IMAGE_NAME}debug:${TAG_NAME}_${CI_COMMIT_SHORT_SHA}_${LAST_COMMIT_DATE_TIME}" - --amend "${DOCKER_IMAGE_NAME}debug:amd64_${TAG_NAME}" - --amend "${DOCKER_IMAGE_NAME}debug:arm64_${TAG_NAME}" - - docker manifest push "${DOCKER_IMAGE_NAME}debug:${TAG_NAME}_${CI_COMMIT_SHORT_SHA}_${LAST_COMMIT_DATE_TIME}" - - - docker pull "${DOCKER_IMAGE_NAME%?}:amd64_${TAG_NAME}" - - docker pull "${DOCKER_IMAGE_NAME%?}:arm64_${TAG_NAME}" - - docker manifest create "${DOCKER_IMAGE_NAME%?}:${TAG_NAME}" - --amend "${DOCKER_IMAGE_NAME%?}:amd64_${TAG_NAME}" - --amend "${DOCKER_IMAGE_NAME%?}:arm64_${TAG_NAME}" - - docker manifest push "${DOCKER_IMAGE_NAME%?}:${TAG_NAME}" - - - docker manifest create "${DOCKER_IMAGE_NAME%?}:${TAG_NAME}_${CI_COMMIT_SHORT_SHA}_${LAST_COMMIT_DATE_TIME}" - --amend "${DOCKER_IMAGE_NAME%?}:amd64_${TAG_NAME}" - --amend "${DOCKER_IMAGE_NAME%?}:arm64_${TAG_NAME}" - - docker manifest push "${DOCKER_IMAGE_NAME%?}:${TAG_NAME}_${CI_COMMIT_SHORT_SHA}_${LAST_COMMIT_DATE_TIME}" - interruptible: false - -# here we use this hack to publish the tezos documentation on -# gitlab.io because we want to publish the doc for the project -# tezos under https://tezos.gitlab.io and not https://tezos.gitlab.io/tezos -# The latter follows the gitlab url convention of -# https://.gitlab.io// -# Notice that we push only if CI_COMMIT_REF_NAME is really master . -# This allows to test the release workflow -publish:documentation: - extends: - - .default_settings_template - - .image_template__runtime_build_test_dependencies_template - - .rules_template__master - stage: doc - before_script: - - sudo apk add --no-cache openssh-client rsync - - echo "${CI_PK_GITLAB_DOC}" > ~/.ssh/id_ed25519 - - echo "${CI_KH}" > ~/.ssh/known_hosts - - chmod 400 ~/.ssh/id_ed25519 - # Load the environment poetry previously created in the docker image. - # Give access to the Python dependencies/executables - - . $HOME/.venv/bin/activate - script: - - if [ "${CI_COMMIT_REF_NAME}" == "master" ] ; then - make -C docs all ; - git clone --depth 5 git@gitlab.com:${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAMESPACE}.gitlab.io gitlab.io ; - rsync --recursive --links --perms --delete - --exclude=.doctrees --exclude={{main,alpha,zero}net,master}/index.html - docs/_build/ gitlab.io/public/ ; - cd gitlab.io ; - else - echo "Skip pushing documentation. Only pushing for real master" ; - fi - - if [ -z "$(git status -s)" ] ; then - echo "Nothing to commit!" ; - else - git add public ; - git commit -m "Import doc of ${CI_PROJECT_NAMESPACE}/${CI_PROJECT_NAME}:${CI_COMMIT_SHA}" ; - git push origin master ; - fi - interruptible: false diff --git a/.gitlab/ci/publish_release.yml b/.gitlab/ci/publish_release.yml new file mode 100644 index 0000000000..31e915343e --- /dev/null +++ b/.gitlab/ci/publish_release.yml @@ -0,0 +1,32 @@ +--- +release-on-gitlab: + image: registry.gitlab.com/gitlab-org/release-cli + extends: + - .rules_template__release_tag + variables: + PACKAGE_REGISTRY_URL: "${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${CI_PROJECT_NAMESPACE}/${CI_COMMIT_TAG}" + stage: publish_release + script: + - apk --no-cache --virtual add jq curl + - scripts/release/create-gitlab-release.sh + dependencies: + - build:static-arm64-linux-binaries + - build:static-x86_64-linux-binaries + +# Note: here we rely on $IMAGE_ARCH_PREFIX to be empty. +# Otherwise, $DOCKER_IMAGE_TAG would contain $IMAGE_ARCH_PREFIX too. +# $IMAGE_ARCH_PREFIX is only used when building Docker images, +# here we handle all architectures so there is no such variable. +docker:merge_manifests: + extends: + - .rules_template__master_and_releases + - .image_template__latest # Docker-in-Docker (dind) + - .docker_registry_auth # Sets up a before_script + stage: publish_release + # this job does not need artifacts from previous jobs. + dependencies: [] + script: + - apk add git + # Environment variables from before_script + - . ./scripts/ci/docker.env + - ./scripts/ci/docker_merge_manifests.sh diff --git a/.gitlab/ci/sanity.yml b/.gitlab/ci/sanity.yml index 55b085c255..39a9752415 100644 --- a/.gitlab/ci/sanity.yml +++ b/.gitlab/ci/sanity.yml @@ -11,7 +11,7 @@ sanity_ci: - make -C manifest check - src/tooling/lint.sh --check-gitlab-ci-yml -docker-hadolint: +docker:hadolint: image: hadolint/hadolint:latest-debian stage: sanity needs: [] diff --git a/.gitlab/ci/templates.yml b/.gitlab/ci/templates.yml index e147bb40df..62e45f6edd 100644 --- a/.gitlab/ci/templates.yml +++ b/.gitlab/ci/templates.yml @@ -1,6 +1,6 @@ variables: ## This value MUST be the same as `opam_repository_tag` in `scripts/version.sh` - build_deps_image_version: 736a35328609e04e1c9e320485f3a47b5fa3251c + build_deps_image_version: 2d722cad68c77373eb49ca4171baac34e7130bbb build_deps_image_name: registry.gitlab.com/tezos/opam-repository GIT_STRATEGY: fetch GIT_DEPTH: "1" @@ -32,8 +32,28 @@ variables: .image_template__runtime_prebuild_dependencies_template: image: ${build_deps_image_name}:runtime-prebuild-dependencies--${build_deps_image_version} +# We should only use the latest version of Docker for experimental features (ex: docker manifest) .image_template__latest: - image: docker:latest + image: docker:20.10.12 + services: + - name: docker:20.10.12-dind + command: ["--experimental"] + variables: + DOCKER_DRIVER: overlay2 + DOCKER_BUILDKIT: 1 + BUILDKIT_PROGRESS: plain + +# We should use the stable version to build Docker images (this template is not used at the moment) +# Match the GitLab runners/executors version for Docker-in-Docker +# https://gitlab.com/nomadic-labs/iac/packer/pck-aws-baseimage/-/blob/master/roles/docker-ubuntu.yaml +.image_template__stable: + image: docker:19.03.15 + services: + - docker:19.03.15-dind + variables: + DOCKER_DRIVER: overlay2 + DOCKER_BUILDKIT: 1 + BUILDKIT_PROGRESS: plain # Rules template @@ -108,6 +128,8 @@ variables: rules: - if: '$CI_COMMIT_TAG =~ /\A\d+\.\d+\.\d+\z/ && $CI_PROJECT_NAMESPACE == $TEZOS_DEFAULT_NAMESPACE' when: on_success + - if: '$CI_PROJECT_NAMESPACE == "nomadic-labs"' + when: on_success - when: never # Rules for specific topics: doc, opam, etc. @@ -180,12 +202,12 @@ variables: .rules_template__trigger_opam_pipeline: rules: - # Run on push to the default branch (i.e master). - - if: '$CI_COMMIT_BRANCH == $TEZOS_DEFAULT_BRANCH' - when: always # Run on scheduled builds. - if: '$TZ_PIPELINE_KIND == "SCHEDULE" && $TZ_SCHEDULE_KIND == "EXTENDED_TESTS"' when: always + # Never run on branch pipelines for master. + - if: '$CI_COMMIT_BRANCH == $TEZOS_DEFAULT_BRANCH' + when: never # Run when the branch name contains the `opam` keyword. - if: '$CI_COMMIT_BRANCH =~ /opam/ || $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME =~ /opam/' when: always @@ -209,7 +231,7 @@ variables: # Job templates # This template is used by the opam test jobs in the child pipeline -# triggered by `bootstrap-opam-pipeline.yml`. +# triggered by `packaging.yml`. .opam_template: extends: - .default_settings_template @@ -249,17 +271,29 @@ variables: # Merge coverage files after the execution .template__coverage_files: extends: .template__coverage - after_script: - - COVERAGE_MERGED=$(echo $CI_JOB_NAME | sed -r 's/[\/_ ]/-/g') - # If the merge fails, we upload a corrupted coverage file for the test job - - bisect-ppx-report merge --coverage-path $BISECT_FILE $COVERAGE_MERGED.coverage - || { COVERAGE_MERGED=$COVERAGE_MERGED.corrupted; echo "" > $COVERAGE_MERGED.coverage; } - # If there is nothing to remove we still want to continue and move the corrupted file - - rm $BISECT_FILE*.coverage || true - - mv $COVERAGE_MERGED.coverage $BISECT_FILE artifacts: - name: "coverage-files-$CI_JOB_NAME" + name: "coverage-files-$CI_JOB_ID" paths: - $BISECT_FILE expire_in: 1 day when: on_success + +.build_template: + extends: + - .default_settings_template + - .image_template__runtime_build_test_dependencies_template + - .template__coverage + stage: build + before_script: + - . ./scripts/version.sh + # Load the environment poetry previously created in the docker image. + # Give access to the Python dependencies/executables + - . $HOME/.venv/bin/activate + +.docker_registry_auth: + before_script: + - ./scripts/ci/docker_registry_auth.sh + +.template__code_quality: + variables: + CODE_QUALITY_REPORT: "_reports/gl-code-quality-report.json" diff --git a/.gitlab/ci/test-doc-scripts.yml b/.gitlab/ci/test-doc-scripts.yml deleted file mode 100644 index c4b9bd1ed4..0000000000 --- a/.gitlab/ci/test-doc-scripts.yml +++ /dev/null @@ -1,71 +0,0 @@ -.base-test-doc-scripts-template: - stage: doc - needs: [] - before_script: - # Trigger an allowed fail on runner that do not have the tezos_infra tag - - if ! echo $CI_RUNNER_TAGS | grep -qe '\btezos_infra\b'; then - echo -e "\e[33m/.\ This test is skipped on runners lacking the tezos_infra tag\e[0m"; - exit 137; - fi - allow_failure: - exit_codes: 137 - -.install_bin_ubuntu_template: - extends: - - .default_settings_template - - .base-test-doc-scripts-template - - .rules_template__development_documentation - script: - - bash ./docs/introduction/install-bin-ubuntu.sh - -install_bin_bionic: - image: public.ecr.aws/lts/ubuntu:18.04_stable - extends: .install_bin_ubuntu_template - -install_bin_focal: - image: public.ecr.aws/lts/ubuntu:20.04_stable - extends: .install_bin_ubuntu_template - -.install_bin_fedora_template: - extends: - - .default_settings_template - - .base-test-doc-scripts-template - - .rules_template__development_documentation - script: - - bash ./docs/introduction/install-bin-fedora.sh - -install_bin_fedora_33: - image: fedora:33 - extends: .install_bin_fedora_template - -install_bin_fedora_34: - image: fedora:34 - extends: .install_bin_fedora_template - -.install_opam_ubuntu_template: - extends: - - .default_settings_template - - .base-test-doc-scripts-template - - .rules_template__development_documentation - script: - - bash < ./docs/introduction/install-opam.sh - -install_opam_bionic: - image: ocaml/opam:ubuntu-18.04 - extends: .install_opam_ubuntu_template - -install_opam_focal: - image: ocaml/opam:ubuntu-20.04 - extends: .install_opam_ubuntu_template - -.compile_sources_template: - extends: - - .default_settings_template - - .base-test-doc-scripts-template - - .rules_template__development_documentation - script: - - bash < ./docs/introduction/compile-sources.sh - -compile_sources_buster: - image: ocaml/opam:debian-10 - extends: .compile_sources_template diff --git a/.gitlab/ci/coq.yml b/.gitlab/ci/test/coq.yml similarity index 100% rename from .gitlab/ci/coq.yml rename to .gitlab/ci/test/coq.yml diff --git a/.gitlab/ci/test/integration.yml b/.gitlab/ci/test/integration.yml new file mode 100644 index 0000000000..24c2bd30af --- /dev/null +++ b/.gitlab/ci/test/integration.yml @@ -0,0 +1,134 @@ +# integration:proto:sandbox and integration:compiler-rejections do not +# require access to the binaries like the "true" integration tests +# below. Therefore, they do not extend the .integration_template. + +integration:proto:sandbox: + extends: .test_template + script: + - dune build @runtest_sandbox + +integration:compiler-rejections: + extends: .test_template + script: + - dune build @runtest_rejections + +############################################################ +## Stage: run scripts to check they are working properly ## +############################################################ + +script:prepare_migration_test: + extends: .test_template + before_script: + - last_proto_name=$(find src -name "proto_[0-9][0-9][0-9]_*" | awk -F'/' '{print $NF}' | sort -r | head -1) + - last_proto_version=$(echo $last_proto_name | cut -d'_' -f2) + - new_proto_version=$(printf "%03d" $((10#$last_proto_version + 1))) + - make + script: + - ./scripts/prepare_migration_test.sh manual "next_$new_proto_version" 1 + +script:snapshot_alpha_and_link: + extends: .test_template + script: + - last_proto_name=$(find src -name "proto_[0-9][0-9][0-9]_*" | awk -F'/' '{print $NF}' | sort -r | head -1) + - last_proto_version=$(echo $last_proto_name | cut -d'_' -f2) + - new_proto_version=$(printf "%03d" $((10#$last_proto_version + 1))) + - make tezos-protocol-compiler + - ./scripts/snapshot_alpha_and_link.sh "$new_proto_version" next + - make + - dune build src/proto_"$new_proto_version"_*/ + +script:test-gen-genesis: + extends: + - .default_settings_template + - .image_template__runtime_build_test_dependencies_template + - .rules_template__development + stage: test + needs: [] + before_script: + - cd scripts/gen-genesis + script: + - dune build gen_genesis.exe + +script:test_release_versions: + extends: .test_template + script: + - ./scripts/test_release_version.sh + +############################################################ +## Stage: run OCaml integration tests ## +############################################################ + +integration:sandboxes:acc-endorsement: + extends: .integration_template + script: + - TMP=$PWD make -f sandbox.Makefile accusations_simple_double_endorsing + artifacts: + paths: + - flextesa-acc-sde + - $BISECT_FILE + expire_in: 1 day + when: always + +############################################################ +## Stage: run python integration tests ## +############################################################ + +# definition for the environment to run all integration tests +# integration tests are run only on x86_64 architectures (for now) +.integration_python_template: + extends: + - .test_template + needs: + - build_x86_64 + dependencies: + - build_x86_64 + before_script: + # Load the environment poetry previously created in the docker image. + # Give access to the Python dependencies/executables + - . $HOME/.venv/bin/activate + - mkdir tests_python/tmp + - cd tests_python + # python scripts don't need the _build directory + # but only the binaries in the artifacts + cache: {} + +integration:static-binaries: + extends: + - .integration_python_template + allow_failure: true + needs: + - build:static-x86_64-linux-binaries + dependencies: + - build:static-x86_64-linux-binaries + script: + - sudo cp -a ../tezos-binaries/x86_64/* ../ + - make -C ../ build-parameters + - poetry run pytest "tests_alpha/test_basic.py" -m "not slow" --exitfirst --color=yes --log-dir=tmp "--junitxml=reports/alpha_batch.xml" --timeout 7200 + +integration:pytest: + extends: + - .integration_python_template + - .template__coverage_files + # the number of jobs have been choosen to give jobs of maximal + # length ~10 minutes and to accommodate the addition of new protocol + # test suites without increasing wall time of the test stage. + parallel: 25 + script: + - poetry run pytest --exitfirst --prev-junit-xml test-results.xml --job $CI_NODE_INDEX/$CI_NODE_TOTAL --color=yes --log-dir=tmp "--junitxml=reports/report_${CI_NODE_INDEX}_${CI_NODE_TOTAL}.xml" --timeout 1800 + - ../scripts/ci/merge_coverage.sh + artifacts: + paths: + - tests_python/tmp/ + - tests_python/reports/*.xml + - $BISECT_FILE + reports: + junit: tests_python/reports/*.xml + expire_in: 7 day + when: always + +integration:pytest_examples: + extends: .integration_python_template + script: + - PYTHONPATH=$PYTHONPATH:./ poetry run python examples/forge_transfer.py + - PYTHONPATH=$PYTHONPATH:./ poetry run python examples/example.py + - PYTHONPATH=./ poetry run pytest --exitfirst examples/test_example.py diff --git a/.gitlab/ci/test/lints.yml b/.gitlab/ci/test/lints.yml new file mode 100644 index 0000000000..76ae690502 --- /dev/null +++ b/.gitlab/ci/test/lints.yml @@ -0,0 +1,104 @@ +misc_checks: + extends: .build_template + stage: test + needs: [] + script: + # checks that all deps of opam packages are already installed + - ./scripts/opam-check.sh + # misc linting + - find . ! -path "./_opam/*" -name "*.opam" -exec opam lint {} +; + - make check-linting + - make check-python-linting + # python checks + - make -C tests_python typecheck + # Ensure that all unit tests are restricted to their opam package + - make lint-tests-pkg + # check that the hack-module patch applies cleanly + - git apply devtools/protocol-print/add-hack-module.patch + # check that yes-wallet builds correctly + - dune build scripts/yes-wallet/yes_wallet.exe + artifacts: + when: always + paths: + - opam_repo.patch + expire_in: 1 days + +semgrep: + extends: + - .rules_template__development + # We specify the image by hash to avoid flakiness. Indeed, if we took the + # latest release, then an update in the parser or analyser could result in new + # errors being found even if the code doesn't change. This would place the + # burden for fixing the code on the wrong dev (the devs who happen to open an + # MR coinciding with the semgrep update rather than the dev who wrote the + # infringing code in the first place). + # Update the hash in scripts/semgrep/README.md too when updating it here + # Last update: 20212-01-03 + image: returntocorp/semgrep-agent:sha-c6cd7cf + stage: test + needs: [] + script: + - echo "OCaml code linting. For information on how to reproduce locally, check out scripts/semgrep/README.md" + - sh ./scripts/semgrep/lint-all-ocaml-sources.sh + +check_precommit_hook: + extends: .build_template + stage: test + needs: [] + script: + - ./scripts/pre_commit/pre_commit.py --test-itself + - poetry run pylint scripts/pre_commit/pre_commit.py + - poetry run pycodestyle scripts/pre_commit/pre_commit.py + - poetry run mypy scripts/pre_commit/pre_commit.py + +check_scripts_b58_prefix: + # Can be changed to a python image, but using the build docker image to keep + # in sync with the python version used for the tests + extends: .test_template + needs: [] + before_script: + - . $HOME/.venv/bin/activate + script: + - poetry run pylint scripts/b58_prefix/b58_prefix.py --disable=missing-docstring --disable=invalid-name + - poetry run pytest scripts/b58_prefix/test_b58_prefix.py -v + +ometrics-code-quality-default: + stage: test + extends: + - .template__code_quality + rules: + # See https://gitlab.com/gitlab-org/gitlab/-/issues/215279 + # In short, GitLab requires a code quality report from the target branch. + # As ometrics tries to find code quality issues against the target branch + # (i.e. previously introduced issues will not be reported), we create a fake + # empty report on the default branch which is supposed to be the target branch + # in most cases. + - if: '$CI_COMMIT_BRANCH == $TEZOS_DEFAULT_BRANCH' + when: always + - when: never + script: + - mkdir -p _reports/ + - echo "[]" > $CODE_QUALITY_REPORT + artifacts: + paths: + - $CODE_QUALITY_REPORT + +ometrics-code-quality: + stage: test + needs: [] + extends: + - .default_settings_template + - .image_template__runtime_build_test_dependencies_template + - .template__code_quality + script: + - OMETRICS_GIT=$CI_MERGE_REQUEST_SOURCE_PROJECT_URL OMETRICS_BRANCH=$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME make lint-ometrics-gitlab + artifacts: + expose_as: 'Code quality report' + when: always + reports: + codequality: $CODE_QUALITY_REPORT + paths: + - _reports/ + expire_in: 15 days + only: + - merge_requests diff --git a/.gitlab/ci/liquidity-baking-scripts-integrity.yml b/.gitlab/ci/test/liquidity-baking-scripts-integrity.yml similarity index 100% rename from .gitlab/ci/liquidity-baking-scripts-integrity.yml rename to .gitlab/ci/test/liquidity-baking-scripts-integrity.yml diff --git a/.gitlab/ci/test/templates.yml b/.gitlab/ci/test/templates.yml new file mode 100644 index 0000000000..31e8a426aa --- /dev/null +++ b/.gitlab/ci/test/templates.yml @@ -0,0 +1,29 @@ +.test_template: + extends: + - .default_settings_template + - .image_template__runtime_build_test_dependencies_template + - .rules_template__development + - .template__coverage + stage: test + before_script: + - . ./scripts/version.sh + retry: 2 + # avoid pushing cache for testing stage + cache: + key: "$CI_COMMIT_REF_SLUG" + policy: pull + +# Definition for the environment to run all integration tests. +# This is also used by Tezt tests. +# In general we do not have to run make, which takes a while, +# because the binaries have been produced by the build job and are +# in the cache. But if they are not, we need to build them. +# Ideally we should also check that the baker / accuser / endorser +# exist (some tests use them) but their actual name depend on the protocol. +.integration_template: + extends: + - .test_template + - .template__coverage_files + dependencies: ["build_x86_64"] + before_script: + - if [ ! -f tezos-node ] || [ ! -f tezos-client ] || [ ! -f tezos-codec ] || [ ! -f tezos-sandbox ]; then make; fi diff --git a/.gitlab/ci/test/tezt.yml b/.gitlab/ci/test/tezt.yml new file mode 100644 index 0000000000..7cb950cef5 --- /dev/null +++ b/.gitlab/ci/test/tezt.yml @@ -0,0 +1,71 @@ +# We use the --job option to split tests into jobs of roughly the same +# duration. This is based on a file that contains timings of test results, +# generated with --record. To rebalance jobs, update this record with: +# +# make && dune exec tezt/tests/main.exe -- --record tezt/test-results.json + +.tezt_template: + artifacts: + reports: + junit: tezt-junit.xml + paths: + - tezt.log + - tezt-results-$CI_NODE_INDEX.json + - $BISECT_FILE + expire_in: 1 day + # certain tests can be blacklisted by adding it to this variable + when: always + variables: + TESTS: "/protocol_override" + script: + - dune exec tezt/tests/main.exe -- $TESTS --color --log-buffer-size 5000 --log-file tezt.log --global-timeout 3300 --junit tezt-junit.xml --from-record tezt/records --job $CI_NODE_INDEX/$CI_NODE_TOTAL --record tezt-results-$CI_NODE_INDEX.json -j 4 --retry 1 + - ./scripts/ci/merge_coverage.sh + +tezt: + extends: + - .integration_template + - .template__coverage_files + - .tezt_template + parallel: 9 + +# these are tezt tests as above, but run using the static binaries +tezt:static-binaries: + extends: + - .integration_template + - .tezt_template + dependencies: + - build:static-x86_64-linux-binaries + parallel: 2 + variables: + TESTS: "cli" + +# Long Tezt tests are not ran in the CI, but we want them to type-check so that +# they can be built in the performance regression test framework executors. +tezt:build-long: + extends: + - .build_template + - .rules_template__development + stage: test + script: + - dune build @tezt/long_tests/check + +# Note: if you reactivate this test and if you keep it manual, put it in the "manual" stage. +# +#tezt:manual:migration: +# extends: +# - .test_template +# - .rules_template__extended_test_pipeline +# before_script: +# - export TEZOS_CLIENT_UNSAFE_DISABLE_DISCLAIMER=Y +# - curl -s https://api.github.com/repos/Phlogi/tezos-snapshots/releases/latest | jq -r ".assets[] | select(.name) | .browser_download_url" | grep roll | xargs wget -q +# - block_hash=$(echo mainnet.roll.* | sed -r 's/mainnet\.roll\.[0-9_-]+\.(.*)\.[0-9]+\.chain\.xz/\1/g') +# - cat mainnet.roll.* | xz -d -v -T0 > mainnet.rolling +# - scripts/prepare_migration_test.sh auto mainnet.rolling "$block_hash" +# script: +# - if [ ! -f tezos-node ] || [ ! -f tezos-client ] || [ ! -f tezos-codec ] || [ ! -f tezos-sandbox ]; then make; fi +# - dune exec ./tezt/manual_tests/main.exe -- migration --color --log-buffer-size 5000 --log-file tezt-migration.log +# artifacts: +# when: always +# paths: +# - tezt-migration.log +# expire_in: 30 days diff --git a/.gitlab/ci/test/unit.yml b/.gitlab/ci/test/unit.yml new file mode 100644 index 0000000000..fac1fea648 --- /dev/null +++ b/.gitlab/ci/test/unit.yml @@ -0,0 +1,159 @@ +.unit_test_template: + extends: .test_template + variables: + ARCH: "" + MAKE_TARGETS: "" + script: + - make $MAKE_TARGETS + artifacts: + name: "$CI_JOB_NAME-$CI_COMMIT_SHA-${ARCH}" + paths: + - test_results + reports: + junit: test_results/*.xml + expire_in: 1 day + when: always + +.unit_test_template_x86_64: + extends: .unit_test_template + needs: ["build_x86_64"] + variables: + ARCH: "x86_64" + +.unit_test_template_x86_64_coverage: + extends: + - .unit_test_template_x86_64 + - .template__coverage_files + script: + - make $MAKE_TARGETS + - ./scripts/ci/merge_coverage.sh + artifacts: + when: always + paths: + - $BISECT_FILE + - test_results + +.unit_test_template_arm64: + extends: .unit_test_template + needs: ["build_arm64"] + variables: + ARCH: "arm64" + tags: + - arm64 + +unit:011_PtHangz2: + extends: + - .unit_test_template_x86_64_coverage + # We use an extra level of indirection for TEST_TARGETS, to avoid + # overly long job names causing GitLab CI to silently fail. + variables: + # Note the use of @ resp. @@ to select tests recursively resp. non-recursively + proto_011_PtHangz2__lib_protocol__1: > + @@src/proto_011_PtHangz2/lib_protocol/test/integration/runtest + @src/proto_011_PtHangz2/lib_protocol/test/integration/consensus/runtest + @src/proto_011_PtHangz2/lib_protocol/test/integration/gas/runtest + proto_011_PtHangz2__lib_protocol__2: > + @src/proto_011_PtHangz2/lib_protocol/test/integration/michelson/runtest + @src/proto_011_PtHangz2/lib_protocol/test/integration/operations/runtest + proto_011_PtHangz2__lib_protocol__3: > + @src/proto_011_PtHangz2/lib_protocol/test/pbt/runtest + @src/proto_011_PtHangz2/lib_protocol/test/unit/runtest + proto_011_PtHangz2: > + @src/proto_011_PtHangz2/lib_benchmark/runtest + @src/proto_011_PtHangz2/lib_client/runtest + parallel: + matrix: + - TEST_TARGETS: + - proto_011_PtHangz2__lib_protocol__1 + - proto_011_PtHangz2__lib_protocol__2 + - proto_011_PtHangz2__lib_protocol__3 + - proto_011_PtHangz2 + script: + - scripts/test_wrapper.sh $TEST_TARGETS ${!TEST_TARGETS} + +unit:012_Psithaca: + extends: + - .unit_test_template_x86_64_coverage + # We use an extra level of indirection for TEST_TARGETS, to avoid + # overly long job names causing GitLab CI to silently fail. + variables: + # Note the use of @ resp. @@ to select tests recursively resp. non-recursively + proto_012_Psithaca__lib_protocol__1: > + @@src/proto_012_Psithaca/lib_protocol/test/integration/runtest + @src/proto_012_Psithaca/lib_protocol/test/integration/consensus/runtest + @src/proto_012_Psithaca/lib_protocol/test/integration/gas/runtest + proto_012_Psithaca__lib_protocol__2: > + @src/proto_012_Psithaca/lib_protocol/test/integration/michelson/runtest + @src/proto_012_Psithaca/lib_protocol/test/integration/operations/runtest + proto_012_Psithaca__lib_protocol__3: > + @src/proto_012_Psithaca/lib_protocol/test/pbt/runtest + @src/proto_012_Psithaca/lib_protocol/test/unit/runtest + proto_012_Psithaca: > + @src/proto_012_Psithaca/lib_benchmark/runtest + @src/proto_012_Psithaca/lib_client/runtest + @src/proto_012_Psithaca/lib_plugin/runtest + @src/proto_012_Psithaca/lib_delegate/runtest + parallel: + matrix: + - TEST_TARGETS: + - proto_012_Psithaca__lib_protocol__1 + - proto_012_Psithaca__lib_protocol__2 + - proto_012_Psithaca__lib_protocol__3 + - proto_012_Psithaca + script: + - scripts/test_wrapper.sh $TEST_TARGETS ${!TEST_TARGETS} + +unit:alpha: + extends: + - .unit_test_template_x86_64_coverage + # We use an extra level of indirection for TEST_TARGETS, to avoid + # overly long job names causing GitLab CI to silently fail. + variables: + # Note the use of @ resp. @@ to select tests recursively resp. non-recursively + proto_alpha__lib_protocol__1: > + @@src/proto_alpha/lib_protocol/test/integration/runtest + @src/proto_alpha/lib_protocol/test/integration/consensus/runtest + @src/proto_alpha/lib_protocol/test/integration/gas/runtest + proto_alpha__lib_protocol__2: > + @src/proto_alpha/lib_protocol/test/integration/michelson/runtest + @src/proto_alpha/lib_protocol/test/integration/operations/runtest + proto_alpha__lib_protocol__3: > + @src/proto_alpha/lib_protocol/test/pbt/runtest + @src/proto_alpha/lib_protocol/test/unit/runtest + proto_alpha: > + @src/proto_alpha/lib_benchmark/runtest + @src/proto_alpha/lib_client/runtest + @src/proto_alpha/lib_plugin/runtest + @src/proto_alpha/lib_delegate/runtest + parallel: + matrix: + - TEST_TARGETS: + - proto_alpha__lib_protocol__1 + - proto_alpha__lib_protocol__2 + - proto_alpha__lib_protocol__3 + - proto_alpha + script: + - scripts/test_wrapper.sh $TEST_TARGETS ${!TEST_TARGETS} + +unit:non-proto-x86_64: + extends: + - .unit_test_template_x86_64_coverage + variables: + MAKE_TARGETS: test-nonproto-unit + +unit:non-proto-arm64: + extends: .unit_test_template_arm64 + variables: + MAKE_TARGETS: test-nonproto-unit + +unit:js_components: + extends: .unit_test_template_x86_64 + script: + # install node + - . ./scripts/install_build_deps.js.sh + - make test-js + +unit:protocol_compiles: + extends: .unit_test_template_x86_64 + script: + - dune build @runtest_compile_protocol diff --git a/.gitlab/ci/test_coverage.yml b/.gitlab/ci/test_coverage.yml new file mode 100644 index 0000000000..d479e199bb --- /dev/null +++ b/.gitlab/ci/test_coverage.yml @@ -0,0 +1,29 @@ +# This job fetchs coverage files by precedent test stage. It creates the html, +# summary and cobertura reports. It also provide a coverage % for the merge request. + +unified_coverage: + extends: + - .default_settings_template + - .rules_template__development_unified_coverage + - .image_template__runtime_build_test_dependencies_template + - .template__coverage + stage: test_coverage + # This job requires all artifacts from the stage test, so we override + # the `dependencies: []` in `.default_settings` by setting `dependencies` + # to `null`. + dependencies: + before_script: + # Load the environment poetry previously created in the docker image. + # Give access to the Python dependencies/executables + - . "$HOME"/.venv/bin/activate + script: + - ./scripts/ci/report_coverage.sh + coverage: '/Coverage: ([^%]+%)/' + artifacts: + expose_as: 'Coverage report' + when: always + reports: + cobertura: _coverage_report/cobertura.xml + paths: + - _coverage_report/ + expire_in: 15 days diff --git a/.gitlab/ci/tezt.yml b/.gitlab/ci/tezt.yml deleted file mode 100644 index 7459c88058..0000000000 --- a/.gitlab/ci/tezt.yml +++ /dev/null @@ -1,43 +0,0 @@ -# We use the --job option to split tests into jobs of roughly the same duration. -# This is based on a file that contains timings of test results, generated with --record. -# To rebalance jobs, update this record with: -# -# make && dune exec tezt/tests/main.exe -- --record tezt/test-results.json - -tezt: - extends: - - .integration_template - - .template__coverage_files - artifacts: - reports: - junit: tezt-junit.xml - paths: - - tezt.log - - tezt-results-$CI_NODE_INDEX.json - - $BISECT_FILE - expire_in: 1 day - when: always - parallel: 9 - script: - - dune exec tezt/tests/main.exe -- --color --log-buffer-size 5000 --log-file tezt.log --global-timeout 3300 --junit tezt-junit.xml --from-record tezt/records --job $CI_NODE_INDEX/$CI_NODE_TOTAL --record tezt-results-$CI_NODE_INDEX.json - -# Note: if you reactivate this test and if you keep it manual, put it in the "manual" stage. -# -#tezt:manual:migration: -# extends: -# - .test_template -# - .rules_template__extended_test_pipeline -# before_script: -# - export TEZOS_CLIENT_UNSAFE_DISABLE_DISCLAIMER=Y -# - curl -s https://api.github.com/repos/Phlogi/tezos-snapshots/releases/latest | jq -r ".assets[] | select(.name) | .browser_download_url" | grep roll | xargs wget -q -# - block_hash=$(echo mainnet.roll.* | sed -r 's/mainnet\.roll\.[0-9_-]+\.(.*)\.[0-9]+\.chain\.xz/\1/g') -# - cat mainnet.roll.* | xz -d -v -T0 > mainnet.rolling -# - scripts/prepare_migration_test.sh auto mainnet.rolling "$block_hash" -# script: -# - if [ ! -f tezos-node ] || [ ! -f tezos-client ] || [ ! -f tezos-codec ] || [ ! -f tezos-sandbox ]; then make; fi -# - dune exec ./tezt/manual_tests/main.exe -- migration --color --log-buffer-size 5000 --log-file tezt-migration.log -# artifacts: -# when: always -# paths: -# - tezt-migration.log -# expire_in: 30 days diff --git a/.gitlab/ci/unittest.yml b/.gitlab/ci/unittest.yml deleted file mode 100644 index 600c4d3cbf..0000000000 --- a/.gitlab/ci/unittest.yml +++ /dev/null @@ -1,116 +0,0 @@ -test-script-gen-genesis: - extends: - - .default_settings_template - - .image_template__runtime_build_test_dependencies_template - - .rules_template__development - stage: test - needs: [] - before_script: - - cd scripts/gen-genesis - script: - - dune build gen_genesis.exe - -.test_template: - extends: - - .default_settings_template - - .image_template__runtime_build_test_dependencies_template - - .rules_template__development - - .template__coverage - stage: test - before_script: - - . ./scripts/version.sh - retry: 2 - # avoid pushing cache for testing stage - cache: - key: "$CI_COMMIT_REF_SLUG" - policy: pull - -.unit_test_template: - extends: .test_template - variables: - ARCH: "" - MAKE_TARGETS: "" - script: - - make $MAKE_TARGETS - artifacts: - name: "$CI_JOB_NAME-$CI_COMMIT_SHA-${ARCH}" - paths: - - test_results - reports: - junit: test_results/*.xml - expire_in: 1 day - when: always - -.unit_test_template_x86_64: - extends: .unit_test_template - needs: ["build_x86_64"] - variables: - ARCH: "x86_64" - -.unit_test_template_arm64: - extends: .unit_test_template - needs: ["build_arm64"] - variables: - ARCH: "arm64" - tags: - - arm64 - -unit:011_PtHangz2: - extends: - - .unit_test_template_x86_64 - - .template__coverage_files - variables: - MAKE_TARGETS: > - src/proto_011_PtHangz2/lib_benchmark/lib_benchmark_type_inference.test_proto - src/proto_011_PtHangz2/lib_benchmark.test_proto - src/proto_011_PtHangz2/lib_client.test_proto - src/proto_011_PtHangz2/lib_protocol.test_proto - -unit:012_PsiThaCa: - extends: - - .unit_test_template_x86_64 - - .template__coverage_files - variables: - MAKE_TARGETS: > - src/proto_012_PsiThaCa/lib_benchmark/lib_benchmark_type_inference.test_proto - src/proto_012_PsiThaCa/lib_benchmark.test_proto - src/proto_012_PsiThaCa/lib_client.test_proto - src/proto_012_PsiThaCa/lib_plugin.test_proto - src/proto_012_PsiThaCa/lib_protocol.test_proto - src/proto_012_PsiThaCa/lib_delegate.test_proto - -unit:alpha: - extends: - - .unit_test_template_x86_64 - - .template__coverage_files - variables: - MAKE_TARGETS: > - src/proto_alpha/lib_benchmark/lib_benchmark_type_inference.test_proto - src/proto_alpha/lib_benchmark.test_proto - src/proto_alpha/lib_client.test_proto - src/proto_alpha/lib_plugin.test_proto - src/proto_alpha/lib_protocol.test_proto - src/proto_alpha/lib_delegate.test_proto -unit:non-proto-x86_64: - extends: - - .unit_test_template_x86_64 - - .template__coverage_files - variables: - MAKE_TARGETS: test-nonproto-unit - -unit:non-proto-arm64: - extends: .unit_test_template_arm64 - variables: - MAKE_TARGETS: test-nonproto-unit - -unit:js_components: - extends: .unit_test_template_x86_64 - script: - # install node - - . ./scripts/install_build_deps.js.sh - - make test-js - -unit:protocol_compiles: - extends: .unit_test_template_x86_64 - script: - - dune build @runtest_compile_protocol diff --git a/CHANGES.rst b/CHANGES.rst index 6e9542b45a..de0d9aa0c0 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,1742 +1,94 @@ -Changelog -''''''''' +Development Changelog +''''''''''''''''''''' + +**NB:** The changelog for releases can be found at: https://tezos.gitlab.io/CHANGES.html + This file lists the changes added to each version of tezos-node, -tezos-client, and the other Tezos binaries. The changes to the Tezos +tezos-client, and the other Octez executables. The changes to the economic protocol are documented in the ``docs/protocols/`` directory; in particular in ``docs/protocols/alpha.rst``. -Development Version -=================== - When you make a commit on master, you can add an item in one of the following subsections (node, client, …) to document your commit or the set of related commits. This will ensure that this change is not -forgotten in the final changelog. By having your commits update this -file you also make it easy to find the commits which are related to your -changes using ``git blame``. +forgotten in the final changelog, which can be found in ``docs/CHANGES.rst``. +By having your commits update this file you also make it easy to find the +commits which are related to your changes using ``git log -p -- CHANGES.rst``. +Relevant items are moved to ``docs/CHANGES.rst`` after each release. Only describe changes which affect users (bug fixes and new features), +or which will affect users in the future (deprecated features), not refactorings or tests. Changes to the documentation do not need to be documented here either. Node ---- -Client ------- - -Baker / Endorser / Accuser --------------------------- - -Proxy server ------------- - -Protocol Compiler And Environment ---------------------------------- - -Codec ------ - -Docker Images -------------- - -Miscellaneous -------------- - -Version 12.0~rc1 -================ - -Node ----- - -- UNIX errors are now displayed using human-friendly English instead of error codes. - -- Manager operations do no longer need to be executed before being - propagated over the network. This feature will be available from - protocol `I`, provided the latter is activated. The aim is to - increase the throughput of transactions gossiped over the network, - while reducing the load on the Octez node's prevalidator - (aka the mempool). - -- The following RPCs output format changed: - - 1. ``/workers/block_validator``, - 2. ``/workers/chain_validators``, - 3. ``/workers/chain_validators/``, - 4. ``/workers/chain_validator//peer_validators``, - 5. ``/workers/chain_validator//peer_validators/``, - 6. ``/workers/prevalidators``. - - The field ``backlog`` is removed. Those logs can be obtained via the - node itself. Logging can be redirected to a file via the option - ``--log-file``. External tools such as ``logrotate`` can be used to - remove entries that are too old. +- **Breaking change**: + restored the encoding of events corresponding to "completed + requests" (block validation, head switch, ...) to pre v11. They only + contains absolute timestamp. -- The node configuration format is changed. The - following paths are removed: +- Add optional query parameters ``applied``, ``refused``, ``outdated``, + ``branch_refused``, and ``branch_delayed`` to RPC + ``GET /chains/main/mempool/pending_operations``. + These new parameters indicate the classifications for which the RPC should + or shouldn't return the corresponding operations. If no option is given, all + the parameters are assumed to be ``true``, making this extension + backward-compatible (i.e. and all operations are returned). - 1. ``shell.chain_validator.limits.worker_backlog_size`` - 2. ``shell.chain_validator.limits.worker_backlog_level`` - 3. ``shell.peer_validator.limits.worker_backlog_size`` - 4. ``shell.peer_validator.limits.worker_backlog_level`` - 5. ``shell.prevalidator.limits.worker_backlog_size`` - 6. ``shell.prevalidator.limits.worker_backlog_level`` - 7. ``shell.block_validator.limits.worker_backlog_size`` - 8. ``shell.block_validator.limits.worker_backlog_level`` - - If those fields are present in your configuration file, they can - simply be removed. - -- Added version ``1`` to RPC ``GET chains/main/mempool/pending_operations``. - It can be used by calling the RPC with the parameter ``?version=1`` - (default version is still ``0``). - -- Added an RPC ``/config/logging`` to reconfigure the logging framework - without having to restart the node. See also the new documentation pages - related to logging. - -- Better handling of mempool cache in the `distributed_db` which - should make the `distributed_db` RAM consumption strongly - correlated to the one of the mempool. - -- Fixed RPC GET ``/chains//mempool/filter``, that did not - show fields of the filter configuration that were equal to their - default value: e.g. if the configuration was the default one, it - just returned ``{}``. Now displays all the fields by default. The - old behavior may be brought back by setting the new optional - parameter ``include_default`` to ``false``. - -- Changed the behavior of RPC POST ``/chains//mempool/filter`` - when provided an input json that does not describe a valid filter - configuration. It used to revert the filter back to the default - configuration in that case, but now it leaves it unchanged. (Note: - if the input json is valid but does not provide all the fields of - the filter configuration, then any missing field is set back to its - default value, rather than left unchanged. This is the same - behavior as the previous version of the RPC.) As this behavior may - be confusing, the RPC now returns the new filter configuration of - the mempool. - -- When encoded in binary, errors now have a single size field. This only - affects the binary representation of errors or values that include errors - inside. It may break the compatibility for tools that request binary-only - answers from the node and parse the errors by hand. - -- Added a new mempool's classification for the recently introduced - outdated error category of protocols in environment v4. - -- Added two optional fields, ``now`` and ``level`` as input to the - ``run_view``, ``run_code``, and ``trace_code`` RPCs (under - ``/chains//blocks//helpers/scripts/``). These - fields can be used to override the values normally returned by the - ``NOW`` and ``LEVEL`` instructions. - -- Add a new CLI & config option ``advertised-net-port``. - -- Added an optional ``show_types`` field in the input of the - ``/chains//blocks//helpers/scripts/typecheck_code`` - RPC. When this field is set to ``false``, type checking details are - omitted. This can be used to improve the performances of this RPC. - -- Fix the comparison operator of history modes to avoid considering - the default history modes as not equal to an history mode manually - set to the same default value. - -- The prevalidator (which handles operations which have been received but not - yet included in a block) was made more restrictive: it now accepts a single - manager operation from a given manager for a given block. This limitation - was already present implicitly if you were using the `tezos-client` commands. - Batches of operations can be used to get around this restriction, see the - `multiple transfers` command to learn more. In addition, operations - rejected because of this limitation are solely delayed to a future block. - -- Removed support for store versions 0.0.4 (used by Octez 9.7) or below. - It is no longer possible to run ``tezos-node upgrade storage`` to upgrade - from those older versions. It is also no longer possible to import - snapshots that were exported using this version. - -- Reduced the memory consumption of the snapshot import. - -- Fixed an inconsistency of the cache: the shell now reloads the cache - from scratch if the application fails because of a hash - inconsistency. - -- Removed the ``granadanet`` built-in network alias. - -- Added the ``ithacanet`` built-in network alias. +- Added optional parameter ``--media-type`` and its corresponding field + in the configuration file. It defines which format of data serialisation + must be used for RPC requests to the node. The value can be ``json``, + ``binary`` or ``any``. By default, the value is set to ``any``. - Added an option ``--listen-prometheus `` to ``tezos-node run`` to expose some metrics using the Prometheus format. -- Fixed an incorrect behaviour of the store which could cause the node - to freeze for a few seconds. +- Adds ``tezos-node storage head-commmit`` command to print the current + context head commit hash to stdout. + +- The node context storage format was upgraded. To this end, a new storage + version was introduced: 0.0.7 (previously 0.0.6). Upgrading from 0.0.6 to + 0.0.7 is done automatically by the node the first time you run it. This + upgrade is instantaneous. However, be careful that previous versions of Octez + will refuse to run on a data directory which was used with Octez 12.0. Client ------ -- Expanded the number of product ids searched with the HID API when looking for a ledger device. - -- Added an optional parameter ``media-type`` for the "accept" header for RPC requests to the node. - The media accept header indicates to the node which format of data serialisation is supported. - The value can be ``json``, ``binary`` or ``any``. +- A new ``--force`` option was added to the ``transfer`` command. It + makes the client inject the transaction in a node even if the + simulation of the transaction fails. -- Added two options, ``--now`` and ``--level`` to the ``run script`` - and ``run view`` commands simulating execution of Michelson - code. These options can be used to override the values normally - returned by the ``NOW`` and ``LEVEL`` instructions. +- A new ``--self-address`` option was added to the ``run script`` + command. It makes the given address be considered the address of + the contract being run. The address must actually exist in the + context. If ``--balance`` wasn't specified, the script also + inherits the given contract's balance. -- The output of ``tezos-client``'s RPC commands now uses the format specified by the ``--media-type``. - -- Added new option ``--replace`` to ``transfer`` and ``multiple transfers`` commands. - This option allows a manager to inject a transfer or a smart contract call - operation (with more fees) to replace an existing one in the node's mempool. - This option should only be used to inject in nodes whose prevalidators use - the new validation scheme of manager operations (called ``operations - precheck``) instead of fully applying the operation in a prevalidation block. - Note that there are no guarantees on which operation will possibly be - included in a block. For instance, the second operation may arrive too late to - the baker, in which case, the latter might includes the first operation and - the second one becomes invalid. - -` Baker / Endorser / Accuser -------------------------- -- Added an optional parameter ``media-type`` for the "accept" header for RPC requests to the node. - The default ``media_type`` is ``binary`` for bakers. - The media accept header indicates to the node which format of data serialisation is supported. - The value can be ``json``, ``binary`` or ``any``. - -- Removed baker, endorser and accuser for Granada. - -Miscellaneous -------------- - -- Made the ``file-descriptor-{path,stdout,stderr}://`` event-logging - sink more configurable (e.g. filtering per level and per section). The - environment variable ``TEZOS_NODE_HOSTNAME`` used for the output of events - was renamed to the more appropriate ``TEZOS_EVENT_HOSTNAME``. - -- Added specific documentation pages about logging for users and - developers. - -- Some RPC entry points are stricter about their inputs. Specifically, some - RPCs where only positive integers would make sense will now error when - provided negative values (instead of, e.g., returning empty results). - -- Added diffing functionality to the Micheline library. It allows to compare - Micheline expressions whose primitives are ``strings``. The difference is - returned as another Micheline expression annotated appropriately in places - where compared values differ. - -Version 11.0 -============ - -No changes compared to 11.0~rc2. - -Version 11.0~rc2 -================ - -- Included fixes from version 10.3. - -Node ----- - -- Added protocol Hangzhou2 (``PtHangz2``), which is a modified version - of Hangzhou (``PtHangzH``) with a number of critical bug fixes. - -- Added a user-activated protocol override from Hangzhou - (``PtHangzH``) to Hangzhou2 (``PtHangz2``) on Mainnet. This - means that nodes using version 11.0~rc2 will activate Hangzhou2 - instead of Hangzhou if Hangzhou was to be activated by the on-chain - governance process. - -- As the Hangzhounet test network was restarted to use ``PtHangz2`` - instead of ``PtHangzH``, the ``hangzhounet`` network alias now - contains the configuration to connect to this restarted - Hangzhounet. - -- Bumped the network version to 2. - -- Added early block advertisement based on a precheck mechanism to - improve the propagation time in the network. This mechanism is only - available for nodes with a network version of 2. - -- The default allocation policy for the OCaml runtime is now ``2`` - (also called ``best-fit``). The previous value was ``0``. This new - policy gives the best compromise in terms of performances and memory - consumption. This policy can be changed using the ``OCAMLRUNPARAM`` - environment variable. For example, to set back this value to ``0``, - one can do ``OCAMLRUNPARAM="a=0"``. More information on this - environment variable can be found `here `__. - -- Improved the performance of the ``raw/bytes`` RPC call. - In particular, this prevents stack overflows that could happen - because of the flattened context if Hangzhou2 is activated. - -- Improved the performance of the context flattening migration that - will happen if Hangzhou2 is activated. In particular, this reduces - how much memory is needed by this operation. - -- Fixed issue #1930: during decoding, the validity of Micheline - annotations is enforced. - -- Improved the snapshot export mechanism by reducing both the export - time and the memory footprint. - -- Added new RPCs to inspect the storage status: - - - GET ``/chains/main/levels/checkpoint``: checkpoint block hash and - level. - - GET ``/chains/main/levels/savepoint``: savepoint block hash and - level. - - GET ``/chains/main/levels/caboose``: caboose block hash and - level. - - GET ``/config/history_mode``: history mode of the node. - -- Deprecated the ``/chains/main/checkpoint`` RPC. It may be deleted - starting from v12.0. - -- The field ``backlog`` of the following RPCs is deprecated and may be - deleted starting from v12.0: - - - ``/workers/block_validator`` - - - ``/workers/chain_validators`` - - - ``/workers/chain_validators/`` - - - ``/workers/chain_validator//peer_validators`` - - - ``/workers/chain_validator//peer_validators/`` - - - ``/workers/prevalidators`` - -- The following paths of the node configuration format are deprecated - and may be deleted starting from v12.0: - - - ``shell.chain_validator.limits.worker_backlog_size`` - - - ``shell.chain_validator.limits.worker_backlog_level`` - - - ``shell.peer_validator.limits.worker_backlog_size`` - - - ``shell.peer_validator.limits.worker_backlog_level`` - - - ``shell.prevalidator.limits.worker_backlog_size`` - - - ``shell.prevalidator.limits.worker_backlog_level`` - - - ``shell.block_validator.limits.worker_backlog_size`` - - - ``shell.block_validator.limits.worker_backlog_level`` - -- The ``tezos-admin-client show current checkpoint`` command now only - outputs the current checkpoint. It no longer outputs the savepoint, - caboose and history mode. - -- When calling the - ``/chains//blocks//helpers/preapply`` RPC, the - preapplication is now done by the external validator process - instead of the main node process. This allows the external - validator to cache the result. If later the block is applied, this - cache is then used to optimize the application of the block. - -- Fixed an inconsistency of the cache internal counter between the - baker and the node when the cache has been emptied. - -Version 11.0~rc1 -================ - -Node ----- - -- **Breaking change**: - updated the output of the ``/stats/gc`` RPC entry point: it now also - reports the number of full major collections made by the OCaml - garbage collector. - -- **Breaking change**: - updated the encoding of chain validator events. - The output of RPC ``GET /workers/chain_validators/`` - was modified as a result. - -- Updated RPC ``GET /workers/prevalidators``: field ``backlog`` now - always returns an empty list. The events in this backlog can now be - obtained either via stdout, or by configuring a new sink for events - via the environment variable ``TEZOS_EVENTS_CONFIG`` (to be set - before launching the node). - -- Updated RPC ``GET /chains//mempool/monitor_operation``: - output was extended to include operation hashes (field name is - ``hash``) and errors (field name is ``error``) when the operation - is classified as ``Branch_delayed``, ``Branch_refused`` or ``Refused``. - -- Improved how the distributed database (DDB) handles the mempool cache. - This should make the DDB RAM consumption strongly correlated - to the one of the mempool. - -- Fixed wrong error message in case of P2P network address binding collision. - -- Added new RPCs to ban/unban operations locally. - - - POST ``/chains//mempool/ban_operation``: ban a given - operation hash. The operation is removed from the mempool, and - its effect is reverted if it was applied. It is also added to - the prevalidator's set of banned operations, to prevent it from - being fetched/processed/injected in the future. - - - POST ``/chains//mempool/unban_operation``: unban a given - operation hash, removing it from the prevalidator's set of banned - operations. Nothing happens if the operation was not banned. - - - POST ``/chains//mempool/unban_all_operations``: unban - all operations, i.e. clear the set of banned operations. - -- Added the possibility to use the ``~``, ``-`` and ``+`` operators - when querying blocks by their level using the - ``/chains/.../blocks/`` RPC. For instance, - ``/chains/main/blocks/41+1`` requests the block at level 42. Before - this change, these notations were only available with aliases (such - as ``head-1``). - -- Added the possibility to use the ``+`` operator when specifying the - block to export, using the ``--block`` argument of the snapshot - export command. Before, only ``~`` and ``-`` were allowed. - -- Fixed a bug where the mempool forgot about ``refused`` operations - on flush, leading to these operations being potentially reevaluated - in the future (e.g. if they are advertised again by a peer). - -- Removed the built-in network aliases for Edonet and Florencenet, - since Edo and Florence have been replaced by Granada. - -- Added a built-in network alias for Hangzhounet. - -Client +Signer ------ -- Disabled indentation checking by default in the ``tezos-client - convert script`` and ``tezos-client hash script`` commands. In - particular, ``tezos-client convert script