From 2012b85d821814275909036e9d55d534f67d01b7 Mon Sep 17 00:00:00 2001 From: Lezek123 Date: Tue, 11 Feb 2025 18:03:05 +0100 Subject: [PATCH 1/3] Colossus: leader:set-replication command --- .github/workflows/run-network-tests.yml | 5 +- docker-compose.yml | 83 ++-- storage-node/CHANGELOG.md | 15 +- storage-node/README.md | 104 +++-- storage-node/package.json | 2 +- .../src/command-base/ApiCommandBase.ts | 3 + .../src/commands/leader/set-replication.ts | 266 +++++++++++++ storage-node/src/commands/server.ts | 4 +- storage-node/src/services/helpers/bagTypes.ts | 31 ++ .../src/services/helpers/bagsUpdateSummary.ts | 365 ++++++++++++++++++ .../src/services/helpers/storageSize.ts | 34 ++ storage-node/src/services/runtime/api.ts | 10 +- .../src/services/runtime/extrinsics.ts | 65 +++- tests/network-tests/run-tests.sh | 1 + .../storage/GenerateChannelAssetsFixture.ts | 159 ++++++++ .../storage/SetReplicationRateFixture.ts | 126 ++++++ .../src/flows/storage/setReplicationRate.ts | 182 +++++++++ .../src/flows/storage/storageCleanup.ts | 117 +----- tests/network-tests/src/scenarios/full.ts | 16 +- tests/network-tests/src/scenarios/storage.ts | 17 + .../src/scenarios/storageSync.ts | 8 - tests/network-tests/start-storage.sh | 13 + 22 files changed, 1420 insertions(+), 206 deletions(-) create mode 100644 storage-node/src/commands/leader/set-replication.ts create mode 100644 storage-node/src/services/helpers/bagsUpdateSummary.ts create mode 100644 storage-node/src/services/helpers/storageSize.ts create mode 100644 tests/network-tests/src/fixtures/storage/GenerateChannelAssetsFixture.ts create mode 100644 tests/network-tests/src/fixtures/storage/SetReplicationRateFixture.ts create mode 100644 tests/network-tests/src/flows/storage/setReplicationRate.ts create mode 100644 tests/network-tests/src/scenarios/storage.ts delete mode 100644 tests/network-tests/src/scenarios/storageSync.ts diff --git a/.github/workflows/run-network-tests.yml b/.github/workflows/run-network-tests.yml index fef9b8be46..d2152a5c45 100644 --- a/.github/workflows/run-network-tests.yml +++ b/.github/workflows/run-network-tests.yml @@ -152,7 +152,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - scenario: ['full', 'setupNewChain', 'setupNewChainMultiStorage', 'bonding', 'storageSync'] + scenario: ['full', 'setupNewChain', 'setupNewChainMultiStorage', 'bonding', 'storage'] include: - scenario: 'full' no_storage: 'false' @@ -160,6 +160,8 @@ jobs: no_storage: 'true' - scenario: 'setupNewChainMultiStorage' no_storage: 'true' + - scenario: 'storage' + cleanup_interval: '1' steps: - uses: actions/checkout@v4 - uses: actions/setup-node@v4 @@ -182,4 +184,5 @@ jobs: run: | export RUNTIME=${{ needs.build_images.outputs.runtime }} export NO_STORAGE=${{ matrix.no_storage }} + export CLEANUP_INTERVAL=${{ matrix.cleanup_interval }} tests/network-tests/run-tests.sh ${{ matrix.scenario }} diff --git a/docker-compose.yml b/docker-compose.yml index dfaf84feb6..2641db4e87 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,8 +8,8 @@ services: - chain-data:/data environment: - CHAIN=${CHAIN} - command: "--chain ${CHAIN:-dev} --alice --validator --pruning=archive --unsafe-ws-external --unsafe-rpc-external - --rpc-methods Safe --rpc-cors=all --log runtime --base-path /data --no-hardware-benchmarks" + command: '--chain ${CHAIN:-dev} --alice --validator --pruning=archive --unsafe-ws-external --unsafe-rpc-external + --rpc-methods Safe --rpc-cors=all --log runtime --base-path /data --no-hardware-benchmarks' ports: - 9944:9944 - 9933:9933 @@ -35,16 +35,25 @@ services: - ACCOUNT_URI=${COLOSSUS_1_TRANSACTOR_URI} - OTEL_EXPORTER_OTLP_ENDPOINT=${TELEMETRY_ENDPOINT} - OTEL_RESOURCE_ATTRIBUTES=service.name=colossus-1,deployment.environment=production + - CLEANUP + - CLEANUP_INTERVAL + - CLEANUP_NEW_OBJECT_EXPIRATION_PERIOD + - CLEANUP_MIN_REPLICATION_THRESHOLD entrypoint: ['/joystream/entrypoints/storage.sh'] - command: [ - 'server', '--worker=${COLOSSUS_1_WORKER_ID}', '--port=3333', '--uploads=/data/uploads/', - '--sync', '--syncInterval=1', - '--storageSquidEndpoint=${COLOSSUS_STORAGE_SQUID_URL}', - '--apiUrl=${JOYSTREAM_NODE_WS}', - '--logFilePath=/logs', - '--tempFolder=/data/temp/', - '--pendingFolder=/data/pending/' - ] + command: + [ + 'server', + '--worker=${COLOSSUS_1_WORKER_ID}', + '--port=3333', + '--uploads=/data/uploads/', + '--sync', + '--syncInterval=1', + '--storageSquidEndpoint=${COLOSSUS_STORAGE_SQUID_URL}', + '--apiUrl=${JOYSTREAM_NODE_WS}', + '--logFilePath=/logs', + '--tempFolder=/data/temp/', + '--pendingFolder=/data/pending/', + ] distributor-1: image: node:18 @@ -68,7 +77,7 @@ services: environment: JOYSTREAM_DISTRIBUTOR__ID: distributor-1 JOYSTREAM_DISTRIBUTOR__ENDPOINTS__STORAGE_SQUID: ${DISTRIBUTOR_STORAGE_SQUID_URL} - JOYSTREAM_DISTRIBUTOR__KEYS: "[{\"suri\":\"${DISTRIBUTOR_1_ACCOUNT_URI}\"}]" + JOYSTREAM_DISTRIBUTOR__KEYS: '[{"suri":"${DISTRIBUTOR_1_ACCOUNT_URI}"}]' JOYSTREAM_DISTRIBUTOR__WORKER_ID: ${DISTRIBUTOR_1_WORKER_ID} JOYSTREAM_DISTRIBUTOR__PUBLIC_API__PORT: 3334 JOYSTREAM_DISTRIBUTOR__OPERATOR_API__PORT: 4334 @@ -105,22 +114,25 @@ services: environment: # ACCOUNT_URI overrides command line arg --accountUri - ACCOUNT_URI=${COLOSSUS_2_TRANSACTOR_URI} - # Env that allows testing cleanup - - CLEANUP_NEW_OBJECT_EXPIRATION_PERIOD=10 - - CLEANUP_MIN_REPLICATION_THRESHOLD=1 + - CLEANUP + - CLEANUP_INTERVAL + - CLEANUP_NEW_OBJECT_EXPIRATION_PERIOD + - CLEANUP_MIN_REPLICATION_THRESHOLD entrypoint: ['yarn', 'storage-node'] - command: [ - 'server', '--worker=${COLOSSUS_2_WORKER_ID}', '--port=3333', '--uploads=/data/uploads', - '--sync', '--syncInterval=1', - '--storageSquidEndpoint=${COLOSSUS_STORAGE_SQUID_URL}', - '--apiUrl=${JOYSTREAM_NODE_WS}', - '--logFilePath=/logs', - '--tempFolder=/data/temp/', - '--pendingFolder=/data/pending/', - # Use cleanup on colossus-2 for testing purposes - '--cleanup', - '--cleanupInterval=1' - ] + command: + [ + 'server', + '--worker=${COLOSSUS_2_WORKER_ID}', + '--port=3333', + '--uploads=/data/uploads', + '--sync', + '--syncInterval=1', + '--storageSquidEndpoint=${COLOSSUS_STORAGE_SQUID_URL}', + '--apiUrl=${JOYSTREAM_NODE_WS}', + '--logFilePath=/logs', + '--tempFolder=/data/temp/', + '--pendingFolder=/data/pending/', + ] distributor-2: image: node:18 @@ -144,7 +156,7 @@ services: environment: JOYSTREAM_DISTRIBUTOR__ID: distributor-2 JOYSTREAM_DISTRIBUTOR__ENDPOINTS__STORAGE_SQUID: ${DISTRIBUTOR_STORAGE_SQUID_URL} - JOYSTREAM_DISTRIBUTOR__KEYS: "[{\"suri\":\"${DISTRIBUTOR_2_ACCOUNT_URI}\"}]" + JOYSTREAM_DISTRIBUTOR__KEYS: '[{"suri":"${DISTRIBUTOR_2_ACCOUNT_URI}"}]' JOYSTREAM_DISTRIBUTOR__WORKER_ID: ${DISTRIBUTOR_2_WORKER_ID} JOYSTREAM_DISTRIBUTOR__PUBLIC_API__PORT: 3334 JOYSTREAM_DISTRIBUTOR__OPERATOR_API__PORT: 4334 @@ -192,8 +204,8 @@ services: - OTEL_EXPORTER_OTLP_ENDPOINT=${TELEMETRY_ENDPOINT} - OTEL_RESOURCE_ATTRIBUTES=service.name=query-node,deployment.environment=production ports: - - "${GRAPHQL_SERVER_PORT}:${GRAPHQL_SERVER_PORT}" - - "127.0.0.1:${PROCESSOR_STATE_APP_PORT}:${PROCESSOR_STATE_APP_PORT}" + - '${GRAPHQL_SERVER_PORT}:${GRAPHQL_SERVER_PORT}' + - '127.0.0.1:${PROCESSOR_STATE_APP_PORT}:${PROCESSOR_STATE_APP_PORT}' depends_on: - db volumes: @@ -275,7 +287,7 @@ services: - PORT=${HYDRA_INDEXER_GATEWAY_PORT} - PGSSLMODE=disable ports: - - "${HYDRA_INDEXER_GATEWAY_PORT}:${HYDRA_INDEXER_GATEWAY_PORT}" + - '${HYDRA_INDEXER_GATEWAY_PORT}:${HYDRA_INDEXER_GATEWAY_PORT}' depends_on: - db - redis @@ -285,7 +297,7 @@ services: container_name: redis restart: unless-stopped ports: - - "127.0.0.1:6379:6379" + - '127.0.0.1:6379:6379' faucet: image: joystream/faucet:carthage @@ -304,7 +316,7 @@ services: - BALANCE_CREDIT=${BALANCE_CREDIT} - BALANCE_LOCKED=${BALANCE_LOCKED} ports: - - "3002:3002" + - '3002:3002' # PostgerSQL database for Orion orion-db: @@ -437,10 +449,7 @@ services: environment: DATABASE_MAX_CONNECTIONS: 5 RUST_LOG: 'actix_web=info,actix_server=info' - command: [ - '--database-url', - 'postgres://postgres:postgres@orion_archive_db:${ARCHIVE_DB_PORT}/squid-archive', - ] + command: ['--database-url', 'postgres://postgres:postgres@orion_archive_db:${ARCHIVE_DB_PORT}/squid-archive'] ports: - '127.0.0.1:${ARCHIVE_GATEWAY_PORT}:8000' - '[::1]:${ARCHIVE_GATEWAY_PORT}:8000' diff --git a/storage-node/CHANGELOG.md b/storage-node/CHANGELOG.md index 193e9b6a88..91724fc9fd 100644 --- a/storage-node/CHANGELOG.md +++ b/storage-node/CHANGELOG.md @@ -1,10 +1,17 @@ +### 4.5.0 + +- Added `leader:set-replication` command which allows adjusting bag-to-bucket assignments in order to achieve a given replication rate. It also supports generating detailed summaries of changes required to make that adjustment. +- Added a possibility to set `CLEANUP` and `CLEANUP_INTERVAL` via env in the `server` command. +- Added a few new utility functions (`stringifyBagId`, `cmpBagId`, `isEvent`, `asStorageSize`, `getBatchResults`). +- Updated `updateStorageBucketsForBags` to rely on the new `getBatchResults` utility function. + ### 4.4.0 - **Optimizations:** The way data objects / data object ids are queried and processed during sync and cleanup has been optimized: - - Sync and cleanup services now process tasks in batches of configurable size (`--syncBatchSize`, `--cleanupBatchSize`) to avoid overflowing the memory. - - Synchronous operations like `sort` or `filter` on larger arrays of data objects have been optimized (for example, by replacing `.filter(Array.includes(...))` with `.filter(Set.has(...))`). - - Enforced a limit of max. results per single GraphQL query to `10,000` and max input arguments per query to `1,000`. - - Added `--cleanupWorkersNumber` flag to limit the number of concurrent async requests during cleanup. + - Sync and cleanup services now process tasks in batches of configurable size (`--syncBatchSize`, `--cleanupBatchSize`) to avoid overflowing the memory. + - Synchronous operations like `sort` or `filter` on larger arrays of data objects have been optimized (for example, by replacing `.filter(Array.includes(...))` with `.filter(Set.has(...))`). + - Enforced a limit of max. results per single GraphQL query to `10,000` and max input arguments per query to `1,000`. + - Added `--cleanupWorkersNumber` flag to limit the number of concurrent async requests during cleanup. - A safety mechanism was added to avoid removing "deleted" objects for which a related `DataObjectDeleted` event cannot be found in storage squid. - Improved logging during sync and cleanup. diff --git a/storage-node/README.md b/storage-node/README.md index e7228f6c62..bdfc5c1628 100644 --- a/storage-node/README.md +++ b/storage-node/README.md @@ -156,6 +156,7 @@ There is also an option to run Colossus as [Docker container](../colossus.Docker * [`storage-node leader:remove-operator`](#storage-node-leaderremove-operator) * [`storage-node leader:set-bucket-limits`](#storage-node-leaderset-bucket-limits) * [`storage-node leader:set-global-uploading-status`](#storage-node-leaderset-global-uploading-status) +* [`storage-node leader:set-replication`](#storage-node-leaderset-replication) * [`storage-node leader:update-bag-limit`](#storage-node-leaderupdate-bag-limit) * [`storage-node leader:update-bags`](#storage-node-leaderupdate-bags) * [`storage-node leader:update-blacklist`](#storage-node-leaderupdate-blacklist) @@ -305,7 +306,7 @@ OPTIONS [default: 4] Upload workers number (max async operations in progress). ``` -_See code: [src/commands/archive.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/archive.ts)_ +_See code: [src/commands/archive.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/archive.ts)_ ## `storage-node help [COMMAND]` @@ -349,7 +350,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/cancel-invite.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/cancel-invite.ts)_ +_See code: [src/commands/leader/cancel-invite.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/cancel-invite.ts)_ ## `storage-node leader:create-bucket` @@ -380,7 +381,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/create-bucket.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/create-bucket.ts)_ +_See code: [src/commands/leader/create-bucket.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/create-bucket.ts)_ ## `storage-node leader:delete-bucket` @@ -407,7 +408,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/delete-bucket.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/delete-bucket.ts)_ +_See code: [src/commands/leader/delete-bucket.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/delete-bucket.ts)_ ## `storage-node leader:invite-operator` @@ -436,7 +437,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/invite-operator.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/invite-operator.ts)_ +_See code: [src/commands/leader/invite-operator.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/invite-operator.ts)_ ## `storage-node leader:remove-operator` @@ -463,7 +464,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/remove-operator.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/remove-operator.ts)_ +_See code: [src/commands/leader/remove-operator.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/remove-operator.ts)_ ## `storage-node leader:set-bucket-limits` @@ -493,7 +494,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/set-bucket-limits.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/set-bucket-limits.ts)_ +_See code: [src/commands/leader/set-bucket-limits.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/set-bucket-limits.ts)_ ## `storage-node leader:set-global-uploading-status` @@ -521,7 +522,47 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/set-global-uploading-status.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/set-global-uploading-status.ts)_ +_See code: [src/commands/leader/set-global-uploading-status.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/set-global-uploading-status.ts)_ + +## `storage-node leader:set-replication` + +Adjusts bag-to-bucket assignments to achieve a given replication rate. + +``` +USAGE + $ storage-node leader:set-replication + +OPTIONS + -a, --activeOnly Only take active buckets into account when calculating replication rate and updating bags + -b, --batchSize=batchSize [default: 100] Number of extrinsics to send in a single utility.batch call + -h, --help show CLI help + -k, --keyFile=keyFile Path to key file to add to the keyring. + -m, --dev Use development mode + -o, --output=output Output result to a file (based on the provided path) instead of stdout + + -p, --password=password Password to unlock keyfiles. Multiple passwords can be passed, to try against all files. + If not specified a single password can be set in ACCOUNT_PWD environment variable. + + -r, --rate=rate (required) The target replication rate + + -u, --apiUrl=apiUrl [default: ws://localhost:9944] Runtime API URL. Mandatory in non-dev environment. + + -y, --accountUri=accountUri Account URI (optional). If not specified a single key can be set in ACCOUNT_URI + environment variable. + + --dryRun Assumes all transactions were successful and generates the summary + + --keyStore=keyStore Path to a folder with multiple key files to load into keystore. + + --skipBucketsSummary Whether to skip a summary of changes by each individual bucket in the final result + + --skipConfirmation Skips asking for confirmation before sending transactions + + --skipTxSummary Whether to skip a summary of changes by each individual batch transaction in the final + result +``` + +_See code: [src/commands/leader/set-replication.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/set-replication.ts)_ ## `storage-node leader:update-bag-limit` @@ -548,7 +589,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/update-bag-limit.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/update-bag-limit.ts)_ +_See code: [src/commands/leader/update-bag-limit.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/update-bag-limit.ts)_ ## `storage-node leader:update-bags` @@ -604,7 +645,7 @@ OPTIONS Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/update-bags.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/update-bags.ts)_ +_See code: [src/commands/leader/update-bags.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/update-bags.ts)_ ## `storage-node leader:update-blacklist` @@ -633,7 +674,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/update-blacklist.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/update-blacklist.ts)_ +_See code: [src/commands/leader/update-blacklist.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/update-blacklist.ts)_ ## `storage-node leader:update-bucket-status` @@ -662,7 +703,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/update-bucket-status.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/update-bucket-status.ts)_ +_See code: [src/commands/leader/update-bucket-status.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/update-bucket-status.ts)_ ## `storage-node leader:update-data-fee` @@ -689,7 +730,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/update-data-fee.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/update-data-fee.ts)_ +_See code: [src/commands/leader/update-data-fee.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/update-data-fee.ts)_ ## `storage-node leader:update-data-object-bloat-bond` @@ -717,7 +758,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/update-data-object-bloat-bond.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/update-data-object-bloat-bond.ts)_ +_See code: [src/commands/leader/update-data-object-bloat-bond.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/update-data-object-bloat-bond.ts)_ ## `storage-node leader:update-dynamic-bag-policy` @@ -747,7 +788,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/update-dynamic-bag-policy.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/update-dynamic-bag-policy.ts)_ +_See code: [src/commands/leader/update-dynamic-bag-policy.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/update-dynamic-bag-policy.ts)_ ## `storage-node leader:update-voucher-limits` @@ -776,7 +817,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/leader/update-voucher-limits.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/leader/update-voucher-limits.ts)_ +_See code: [src/commands/leader/update-voucher-limits.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/leader/update-voucher-limits.ts)_ ## `storage-node operator:accept-invitation` @@ -809,7 +850,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/operator/accept-invitation.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/operator/accept-invitation.ts)_ +_See code: [src/commands/operator/accept-invitation.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/operator/accept-invitation.ts)_ ## `storage-node operator:set-metadata` @@ -840,7 +881,7 @@ OPTIONS --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/operator/set-metadata.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/operator/set-metadata.ts)_ +_See code: [src/commands/operator/set-metadata.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/operator/set-metadata.ts)_ ## `storage-node server` @@ -914,6 +955,12 @@ OPTIONS -z, --logFileChangeFrequency=(yearly|monthly|daily|hourly|none) [default: daily] Log files update frequency. + --cleanupBatchSize=cleanupBatchSize [default: 10000] Maximum number of objects to process + in a single batch during cleanup. + + --cleanupWorkersNumber=cleanupWorkersNumber [default: 100] Cleanup workers number (max async + operations in progress). + --elasticSearchIndexPrefix=elasticSearchIndexPrefix Elasticsearch index prefix. Node ID will be appended to the prefix. Default: logs-colossus. Can be passed through ELASTIC_INDEX_PREFIX environment variable. @@ -937,6 +984,9 @@ OPTIONS If not specified a subfolder under the uploads directory will be used. + --syncBatchSize=syncBatchSize [default: 10000] Maximum number of objects to process + in a single batch during synchronization. + --syncRetryInterval=syncRetryInterval [default: 3] Interval before retrying failed synchronization run (in minutes) @@ -946,7 +996,7 @@ OPTIONS directory will be used. ``` -_See code: [src/commands/server.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/server.ts)_ +_See code: [src/commands/server.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/server.ts)_ ## `storage-node util:cleanup` @@ -981,10 +1031,13 @@ OPTIONS -y, --accountUri=accountUri Account URI (optional). If not specified a single key can be set in ACCOUNT_URI environment variable. + --cleanupBatchSize=cleanupBatchSize [default: 10000] Maximum number of objects to process in a single + batch during cleanup. + --keyStore=keyStore Path to a folder with multiple key files to load into keystore. ``` -_See code: [src/commands/util/cleanup.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/util/cleanup.ts)_ +_See code: [src/commands/util/cleanup.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/util/cleanup.ts)_ ## `storage-node util:fetch-bucket` @@ -1011,13 +1064,16 @@ OPTIONS -t, --syncWorkersTimeout=syncWorkersTimeout [default: 30] Asset downloading timeout for the syncronization (in minutes). + --syncBatchSize=syncBatchSize [default: 10000] Maximum number of objects to process in a single + batch. + --tempFolder=tempFolder Directory to store tempory files during sync and upload (absolute path). ,Temporary directory (absolute path). If not specified a subfolder under the uploads directory will be used. ``` -_See code: [src/commands/util/fetch-bucket.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/util/fetch-bucket.ts)_ +_See code: [src/commands/util/fetch-bucket.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/util/fetch-bucket.ts)_ ## `storage-node util:multihash` @@ -1032,7 +1088,7 @@ OPTIONS -h, --help show CLI help ``` -_See code: [src/commands/util/multihash.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/util/multihash.ts)_ +_See code: [src/commands/util/multihash.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/util/multihash.ts)_ ## `storage-node util:search-archives` @@ -1049,7 +1105,7 @@ OPTIONS -o, --dataObjects=dataObjects (required) List of the data object ids to look for (comma-separated) ``` -_See code: [src/commands/util/search-archives.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/util/search-archives.ts)_ +_See code: [src/commands/util/search-archives.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/util/search-archives.ts)_ ## `storage-node util:verify-bag-id` @@ -1077,5 +1133,5 @@ OPTIONS - dynamic:member:4 ``` -_See code: [src/commands/util/verify-bag-id.ts](https://github.com/Joystream/joystream/blob/v4.4.0/src/commands/util/verify-bag-id.ts)_ +_See code: [src/commands/util/verify-bag-id.ts](https://github.com/Joystream/joystream/blob/v4.5.0/src/commands/util/verify-bag-id.ts)_ diff --git a/storage-node/package.json b/storage-node/package.json index fb5127f42d..c3ad4ed960 100644 --- a/storage-node/package.json +++ b/storage-node/package.json @@ -1,7 +1,7 @@ { "name": "storage-node", "description": "Joystream storage subsystem.", - "version": "4.4.0", + "version": "4.5.0", "author": "Joystream contributors", "bin": { "storage-node": "./bin/run" diff --git a/storage-node/src/command-base/ApiCommandBase.ts b/storage-node/src/command-base/ApiCommandBase.ts index 8a803069c2..caa6d108f0 100644 --- a/storage-node/src/command-base/ApiCommandBase.ts +++ b/storage-node/src/command-base/ApiCommandBase.ts @@ -61,6 +61,9 @@ export default abstract class ApiCommandBase extends Command { async finally(err: Error | undefined): Promise { // called after run and catch regardless of whether or not the command errored // We'll force exit here, in case there is no error, to prevent console.log from hanging the process + if (err && process.env.DEBUG) { + console.error(err) + } if (!err) this.exit(0) super.finally(err) } diff --git a/storage-node/src/commands/leader/set-replication.ts b/storage-node/src/commands/leader/set-replication.ts new file mode 100644 index 0000000000..37d63d23e1 --- /dev/null +++ b/storage-node/src/commands/leader/set-replication.ts @@ -0,0 +1,266 @@ +import _ from 'lodash' +import fs from 'fs/promises' +import readline from 'node:readline/promises' +import { stderr, stdin } from 'node:process' +import assert from 'node:assert' +import { SingleBar } from 'cli-progress' +import { flags } from '@oclif/command' +import { createType } from '@joystream/types' +import ExitCodes from '../../command-base/ExitCodes' +import LeaderCommandBase from '../../command-base/LeaderCommandBase' +import logger from '../../services/logger' +import { getBatchResults, ParsedBatchCallResult } from '../../services/runtime/extrinsics' +import { cmpBagIds, stringifyBagId } from '../../services/helpers/bagTypes' +import { sendAndFollowNamedTx } from '../../services/runtime/api' +import { BagUpdate, BagsUpdateSummaryCreator, UpdateableBucket } from '../../services/helpers/bagsUpdateSummary' + +/** + * CLI command: + * Adjusts bag-to-bucket assignments to achieve a given replication rate. + * + * @remarks + * Storage working group leader command. Requires storage WG leader priviliges. + * Shell command: "leader:set-replication" + */ +export default class LeaderSetReplication extends LeaderCommandBase { + static description = `Adjusts bag-to-bucket assignments to achieve a given replication rate.` + + static flags = { + rate: flags.integer({ + char: 'r', + required: true, + description: 'The target replication rate', + }), + activeOnly: flags.boolean({ + char: 'a', + default: true, + description: 'Only take active buckets into account when calculating replication rate and updating bags', + }), + batchSize: flags.integer({ + char: 'b', + default: 100, + description: 'Number of extrinsics to send in a single utility.batch call', + }), + dryRun: flags.boolean({ + default: false, + description: 'Assumes all transactions were successful and generates the summary', + }), + skipBucketsSummary: flags.boolean({ + default: false, + description: 'Whether to skip a summary of changes by each individual bucket in the final result', + }), + skipTxSummary: flags.boolean({ + default: false, + description: 'Whether to skip a summary of changes by each individual batch transaction in the final result', + }), + skipConfirmation: flags.boolean({ + default: false, + description: 'Skips asking for confirmation before sending transactions', + }), + output: flags.string({ + char: 'o', + description: 'Output result to a file (based on the provided path) instead of stdout', + }), + ...LeaderCommandBase.flags, + } + + async promptForConfirmation(): Promise { + const rl = readline.createInterface({ input: stdin, output: stderr }) + const confirmed = await rl.question('Are you sure you want to continue? (y/N) ') + rl.close() + + return confirmed === 'y' + } + + async run(): Promise { + const { + flags: { + rate: targetReplicationRate, + activeOnly, + batchSize, + skipBucketsSummary, + skipTxSummary, + output, + dryRun, + skipConfirmation, + }, + } = this.parse(LeaderSetReplication) + + if (output) { + try { + await fs.writeFile(output, '') + } catch (e) { + logger.error(`Cannot access "${output}" for writing: ${e instanceof Error ? e.message : String(e)}`) + this.exit(ExitCodes.FileError) + } + } + + const api = await this.getApi() + + logger.info(`Fetching${activeOnly ? ' active' : ''} storage buckets...`) + + const storageBucketsMap = new Map( + (await api.query.storage.storageBucketById.entries()) + .flatMap(([sKey, value]) => { + const bucketId = sKey.args[0].toNumber() + const storageBucket = value.unwrap() + const isActive = storageBucket.operatorStatus.isStorageWorker + if (!isActive && activeOnly) { + return [] + } + + return [[bucketId, new UpdateableBucket(bucketId, storageBucket)] as const] + }) + // Sort entries to ensure deterministic results + .sort(([idA], [idB]) => idA - idB) + ) + + logger.info(`${storageBucketsMap.size}${activeOnly ? ' active' : ''} storage buckets found.`) + + logger.info(`Fetching storage bags...`) + + const storageBags = (await api.query.storage.bags.entries()) + .map(([sKey, value]) => { + const bagId = sKey.args[0] + const bagData = { + size: value.objectsTotalSize.toBigInt(), + objectsNum: value.objectsNumber.toBigInt(), + storedBy: Array.from(value.storedBy.values()).map((v) => v.toNumber()), + } + return [bagId, bagData] as const + }) + // Sort entries to ensure deterministic results + .sort(([idA], [idB]) => cmpBagIds(idA, idB)) + + logger.info(`${storageBags.length} storage bags found.`) + + logger.info(`Preparing storage bag updates...`) + + const bagUpdates: BagUpdate[] = [] + let avgReplicationRate = 0 + for (const [bagId, bag] of storageBags) { + const bucketsToRemove = new Set() + const bucketsToAdd = new Set() + + const storedBy = bag.storedBy + .map((bucketId) => storageBucketsMap.get(bucketId)) + .filter((bucket): bucket is UpdateableBucket => !!bucket) + + avgReplicationRate += storedBy.length / storageBags.length + + while (storedBy.length > targetReplicationRate) { + // Pick a bucket with lowest storage available (taking into account already scheduled updates) + // among buckets that store the bag + const bucket = _.minBy(storedBy, (b) => b.storage.availableAfter) + assert(bucket) + bucket.storage.change -= bag.size + bucket.objects.change -= bag.objectsNum + bucket.bagsToRemove.add(stringifyBagId(bagId)) + bucketsToRemove.add(bucket.id) + _.remove(storedBy, (b) => b.id === bucket.id) + } + + while (storedBy.length < targetReplicationRate) { + // Pick a bucket with highest storage available among buckets that DON'T store the bag + // (taking into account already scheduled updates) and: + // - have objects.availableAfter >= bag.objectsNum + // - have storage.availableAfter >= bag.size + // - have acceptingNewBags == true + const notStoredBy = _.difference( + Array.from(storageBucketsMap.keys()), + storedBy.map((b) => b.id) + ) + .map((bucketId) => storageBucketsMap.get(bucketId)) + .filter( + (bucket): bucket is UpdateableBucket => + !!bucket && + bucket.acceptingNewBags && + bucket.objects.availableAfter >= bag.objectsNum && + bucket.storage.availableAfter >= bag.size + ) + const bucket = _.maxBy(notStoredBy, (b) => b.storage.availableAfter) + assert( + bucket, + 'Storage system capacity too low. Increase some stroage bucket voucher limits or choose a lower replication rate.' + ) + bucket.storage.change += bag.size + bucket.objects.change += bag.objectsNum + bucket.bagsToAdd.add(stringifyBagId(bagId)) + bucketsToAdd.add(bucket.id) + storedBy.push(bucket) + } + + if (bucketsToAdd.size || bucketsToRemove.size) { + bagUpdates.push({ + size: bag.size, + bagId, + bucketsToAdd, + bucketsToRemove, + }) + } + } + logger.info(`${bagUpdates.length} updates prepared.`) + + const summaryCreator = new BagsUpdateSummaryCreator({ + logger, + initAvgReplicationRate: avgReplicationRate, + skipBucketsSummary, + skipTxSummary, + storageBucketsMap, + targetReplicationRate, + totalBagsNum: storageBags.length, + }) + + const chunkedBagUpdates = _.chunk(bagUpdates, batchSize) + const batchTxs = chunkedBagUpdates.map((updatesBatch) => + api.tx.utility.forceBatch( + updatesBatch.map((args) => + api.tx.storage.updateStorageBucketsForBag( + args.bagId, + createType('BTreeSet', args.bucketsToAdd), + createType('BTreeSet', args.bucketsToRemove) + ) + ) + ) + ) + + logger.info( + `Will execute ${bagUpdates.length} storage bag updates in ${batchTxs.length} utility.forceBatch transactions` + ) + summaryCreator.printExpectedResults() + + const confirmed = skipConfirmation ? true : await this.promptForConfirmation() + + if (confirmed) { + const progressBar = new SingleBar({ noTTYOutput: true }) + progressBar.start(batchTxs.length, 0, { title: `Executing the transactions...` }) + for (const i in batchTxs) { + const batchTx = batchTxs[i] + const batchBagUpdates = chunkedBagUpdates[i] + const batchResults: ParsedBatchCallResult[] | void = dryRun + ? Array.from({ length: batchBagUpdates.length }, () => ({ success: true })) + : await sendAndFollowNamedTx(api, this.getAccount(), batchTx, (result) => + getBatchResults(batchTx, api, result) + ) + assert(batchResults, `Could not parse utility.forceBatch results (tx: ${batchTx.hash.toJSON()})`) + summaryCreator.updateSummaryWithBatchResults( + dryRun ? `tx${i}_hash` : batchTx.hash.toJSON(), + batchResults, + batchBagUpdates + ) + progressBar.update(parseInt(i) + 1) + } + progressBar.stop() + + const summaryJson = summaryCreator.getSummaryJSON() + if (output) { + logger.info(`Writing output to ${output}...`) + await fs.writeFile(output, summaryJson) + } else { + console.log(summaryJson) + } + } + + this.exit() + } +} diff --git a/storage-node/src/commands/server.ts b/storage-node/src/commands/server.ts index 62be3c75cc..6e94c51c14 100644 --- a/storage-node/src/commands/server.ts +++ b/storage-node/src/commands/server.ts @@ -85,7 +85,8 @@ export default class Server extends ApiCommandBase { cleanup: flags.boolean({ char: 'c', description: 'Enable cleanup/pruning of no-longer assigned assets.', - default: false, + // Setting `env` key doesn't work for boolean flags: https://github.com/oclif/core/issues/487 + default: process.env.CLEANUP === 'true', }), cleanupBatchSize: flags.integer({ description: 'Maximum number of objects to process in a single batch during cleanup.', @@ -95,6 +96,7 @@ export default class Server extends ApiCommandBase { char: 'i', description: 'Interval between periodic cleanup actions (in minutes)', default: 360, + env: 'CLEANUP_INTERVAL', }), cleanupWorkersNumber: flags.integer({ required: false, diff --git a/storage-node/src/services/helpers/bagTypes.ts b/storage-node/src/services/helpers/bagTypes.ts index 32eb8cf444..16226cd7bc 100644 --- a/storage-node/src/services/helpers/bagTypes.ts +++ b/storage-node/src/services/helpers/bagTypes.ts @@ -7,6 +7,7 @@ import { import { createType, keysOf } from '@joystream/types' import ExitCodes from '../../command-base/ExitCodes' import { CLIError } from '@oclif/errors' +import { hexToBigInt } from '@polkadot/util' /** * Special error type for bagId parsing. Extends the CLIError with setting @@ -51,6 +52,36 @@ export function parseBagId(bagId: string): BagId { return parser.parse() } +/** + * Converts a BagId Codec type to string + * (compatible with Storage Squid / Orion / Query Node bag id) + * + * @param bagId Bag id as Codec type + * @returns Bag id as string + */ +export function stringifyBagId(bagId: BagId): string { + if (bagId.isDynamic) { + return bagId.asDynamic.isChannel + ? `dynamic:channel:${bagId.asDynamic.asChannel.toString()}` + : `dynamic:member:${bagId.asDynamic.asMember.toString()}` + } + + return bagId.asStatic.isCouncil ? `static:council` : `static:wg:${bagId.asStatic.asWorkingGroup.type.toLowerCase()}` +} + +/** + * Compares two bag ids by converting them to a BigInt + * (useful for sorting) + * + * @param idA First bag id + * @param idB Second bag id + * @returns -1 if idA < idB, 0 if idA == idB, 1 if idA > idA + */ +export function cmpBagIds(idA: BagId, idB: BagId): number { + const diff = hexToBigInt(idA.toHex()) - hexToBigInt(idB.toHex()) + return diff < 0 ? -1 : diff > 0 ? 1 : 0 +} + /** * Class-helper for actual bag ID parsing. */ diff --git a/storage-node/src/services/helpers/bagsUpdateSummary.ts b/storage-node/src/services/helpers/bagsUpdateSummary.ts new file mode 100644 index 0000000000..10b427b21f --- /dev/null +++ b/storage-node/src/services/helpers/bagsUpdateSummary.ts @@ -0,0 +1,365 @@ +import _ from 'lodash' +import { PalletStorageBagIdType as BagId, PalletStorageStorageBucketRecord } from '@polkadot/types/lookup' +import { ParsedBatchCallResult } from '../runtime/extrinsics' +import { stringifyBagId } from './bagTypes' +import { Logger } from 'winston' +import { asStorageSize } from './storageSize' +import { Enum } from '@polkadot/types-codec' + +export class UpdateableLimit { + public readonly limit: bigint + public readonly usedBefore: bigint + public change = 0n + + constructor(limit: bigint, usedBefore: bigint) { + this.limit = limit + this.usedBefore = usedBefore + } + + public get usedAfter(): bigint { + return this.usedBefore + this.change + } + + public get availableBefore(): bigint { + return this.limit - this.usedBefore + } + + public get availableAfter(): bigint { + return this.limit - this.usedAfter + } +} + +export class UpdateableBucket { + public readonly id: number + public readonly storage: UpdateableLimit + public readonly objects: UpdateableLimit + public readonly acceptingNewBags: boolean + public readonly active: boolean + public readonly bagsToRemove: Set + public readonly bagsToAdd: Set + + constructor(bucketId: number, storageBucket: PalletStorageStorageBucketRecord) { + const { sizeLimit, sizeUsed, objectsLimit, objectsUsed } = storageBucket.voucher + this.id = bucketId + this.active = storageBucket.operatorStatus.isStorageWorker.valueOf() + this.acceptingNewBags = storageBucket.acceptingNewBags.valueOf() + this.storage = new UpdateableLimit(sizeLimit.toBigInt(), sizeUsed.toBigInt()) + this.objects = new UpdateableLimit(objectsLimit.toBigInt(), objectsUsed.toBigInt()) + this.bagsToRemove = new Set() + this.bagsToAdd = new Set() + } +} + +type BeforeAfterStats = { + before: T + after: T +} + +type BagsSummary = { + totalSize: bigint + bags: { id: string; size: bigint }[] +} + +export type BagUpdate = { + size: bigint + bagId: BagId + bucketsToAdd: Set + bucketsToRemove: Set +} + +type FailedBagUpdate = BagUpdate & { error: string } + +export type Serialized = T extends Record + ? { [K in keyof T]: undefined extends T[K] ? Serialized> | undefined : Serialized } + : T extends Set + ? Serialized[] + : T extends Array + ? Array> + : T extends bigint + ? string + : T extends BagId + ? string + : T + +type BucketUpdateSummary = { + id: number + storageUsed: BeforeAfterStats + removed: BagsSummary + added: BagsSummary + failedToRemove: BagsSummary + failedToAdd: BagsSummary +} + +type TransactionSummary = { + hash: string + totalStorageUsage: BeforeAfterStats + avgReplicationRate: BeforeAfterStats + successfulUpdates: BagUpdate[] + failedUpdates: FailedBagUpdate[] +} + +export type FinalSummary = { + totalStorageUsage: BeforeAfterStats + avgReplicationRate: BeforeAfterStats + buckets?: BucketUpdateSummary[] + transactions?: TransactionSummary[] +} + +type BagsUpdateSummaryCreatorConfig = { + logger: Logger + initAvgReplicationRate: number + totalBagsNum: number + storageBucketsMap: Map + skipBucketsSummary: boolean + skipTxSummary: boolean + targetReplicationRate: number +} + +export class BagsUpdateSummaryCreator { + private currentTxSummary?: TransactionSummary + private perBucketSummaries?: Map + private summary: FinalSummary + + constructor(private config: BagsUpdateSummaryCreatorConfig) { + this.summary = this.initSummary() + } + + private formatBeforeAfterStats(stats: BeforeAfterStats, unit = '', decimals = 2): string { + const change = stats.after - stats.before + const formatValue = (v: number, addSign = false) => + (addSign && v >= 0 ? '+' : '') + v.toFixed(decimals) + (unit ? ` ${unit}` : '') + return `${formatValue(stats.before)} => ${formatValue(stats.after)} (${formatValue(change, true)})` + } + + public printExpectedResults(includeBuckets = true): void { + const { logger, initAvgReplicationRate, targetReplicationRate, storageBucketsMap } = this.config + const [storageUsageBefore, storageUnit] = asStorageSize(this.summary.totalStorageUsage.before) + const [storageUsageAfter] = asStorageSize( + this.summary.totalStorageUsage.before + + Array.from(storageBucketsMap.values()).reduce((sum, b) => (sum += b.storage.change), 0n), + storageUnit + ) + + const replicationRateStats = { + before: initAvgReplicationRate, + after: targetReplicationRate, + } + const storageUsageStats = { + before: storageUsageBefore, + after: storageUsageAfter, + } + + let output = '\n' + output += `Avg. replication rate: ${this.formatBeforeAfterStats(replicationRateStats)}\n` + output += `Total storage usage: ${this.formatBeforeAfterStats(storageUsageStats, storageUnit)}\n` + output += '\n' + + if (includeBuckets) { + for (const bucket of Array.from(storageBucketsMap.values())) { + const [storageUsageBefore, storageUsageUnit] = asStorageSize(bucket.storage.usedBefore) + const [storageUsageAfter] = asStorageSize(bucket.storage.usedAfter, storageUsageUnit) + + const [storageAvailBefore, storageAvailUnit] = asStorageSize(bucket.storage.availableBefore) + const [storageAvailAfter] = asStorageSize(bucket.storage.availableAfter, storageAvailUnit) + + const storageUsageStats = { before: storageUsageBefore, after: storageUsageAfter } + const storageAvailStats = { before: storageAvailBefore, after: storageAvailAfter } + + // TODO: Add objects limit + output += `-- Bucket ${bucket.id}:\n` + output += `---- Storage usage: ${this.formatBeforeAfterStats(storageUsageStats, storageUsageUnit)}\n` + output += `---- Storage available: ${this.formatBeforeAfterStats(storageAvailStats, storageAvailUnit)}\n` + output += `---- Bags to remove: ${bucket.bagsToRemove.size}\n` + output += `---- Bags to add: ${bucket.bagsToAdd.size}\n` + } + } + + logger.info(`Expected results:\n${output}`) + } + + private initSummary(): FinalSummary { + const { initAvgReplicationRate, storageBucketsMap, skipBucketsSummary } = this.config + const initStorageUsage = Array.from(storageBucketsMap.values()).reduce((sum, b) => sum + b.storage.usedBefore, 0n) + const summary: FinalSummary = { + totalStorageUsage: { + before: initStorageUsage, + after: initStorageUsage, + }, + avgReplicationRate: { + before: initAvgReplicationRate, + after: initAvgReplicationRate, + }, + } + + if (!skipBucketsSummary) { + this.perBucketSummaries = new Map() + for (const bucket of Array.from(storageBucketsMap.values())) { + this.perBucketSummaries.set(bucket.id, { + id: bucket.id, + storageUsed: { + before: bucket.storage.usedBefore, + after: bucket.storage.usedAfter, + }, + added: { totalSize: 0n, bags: [] }, + removed: { totalSize: 0n, bags: [] }, + failedToAdd: { totalSize: 0n, bags: [] }, + failedToRemove: { totalSize: 0n, bags: [] }, + }) + } + } + + return summary + } + + private initTxSummary(txHash: string): void { + const transactionSummary: TransactionSummary = { + hash: txHash, + avgReplicationRate: { + before: this.summary.avgReplicationRate.after, + after: this.summary.avgReplicationRate.after, + }, + totalStorageUsage: { + before: this.summary.totalStorageUsage.after, + after: this.summary.totalStorageUsage.after, + }, + failedUpdates: [], + successfulUpdates: [], + } + + if (!this.summary.transactions) { + this.summary.transactions = [] + } + + this.summary.transactions.push(transactionSummary) + this.currentTxSummary = transactionSummary + } + + public handleSuccessfulBagUpdate(bagUpdate: BagUpdate): void { + this.updateStorageUsage(this.summary.totalStorageUsage, bagUpdate) + this.updateAvgReplicationRate(this.summary.avgReplicationRate, bagUpdate) + if (this.currentTxSummary) { + this.currentTxSummary.successfulUpdates.push(bagUpdate) + } + this.updatePerBucketSummaries(bagUpdate) + } + + private updateBagsSummary(bagsSummary: BagsSummary, bagUpdate: BagUpdate) { + bagsSummary.totalSize += bagUpdate.size + bagsSummary.bags.push({ id: stringifyBagId(bagUpdate.bagId), size: bagUpdate.size }) + } + + private updateBagsSummaryOfBucket( + bucketId: number, + type: 'added' | 'failedToAdd' | 'removed' | 'failedToRemove', + bagUpdate: BagUpdate + ) { + if (this.perBucketSummaries) { + const bucketSummary = this.perBucketSummaries.get(bucketId) + if (bucketSummary) { + this.updateBagsSummary(bucketSummary[type], bagUpdate) + } + } + } + + private updatePerBucketSummaries(bagUpdate: BagUpdate | FailedBagUpdate): void { + if (this.perBucketSummaries) { + if ('error' in bagUpdate) { + for (const bucketId of bagUpdate.bucketsToAdd) { + this.updateBagsSummaryOfBucket(bucketId, 'failedToAdd', bagUpdate) + } + for (const bucketId of bagUpdate.bucketsToRemove) { + this.updateBagsSummaryOfBucket(bucketId, 'failedToRemove', bagUpdate) + } + } else { + for (const bucketId of bagUpdate.bucketsToAdd) { + this.updateBagsSummaryOfBucket(bucketId, 'added', bagUpdate) + } + for (const bucketId of bagUpdate.bucketsToRemove) { + this.updateBagsSummaryOfBucket(bucketId, 'removed', bagUpdate) + } + } + } + } + + public handleFailedBagUpdate(failedBagUpdate: FailedBagUpdate): void { + if (this.currentTxSummary) { + this.currentTxSummary.failedUpdates.push(failedBagUpdate) + } + this.updatePerBucketSummaries(failedBagUpdate) + } + + private updateAvgReplicationRate(avgReplicationRate: BeforeAfterStats, bagUpdate: BagUpdate) { + const { targetReplicationRate, totalBagsNum } = this.config + const bagPreviousReplicationRate = + targetReplicationRate + bagUpdate.bucketsToRemove.size - bagUpdate.bucketsToAdd.size + avgReplicationRate.after -= bagPreviousReplicationRate / totalBagsNum + avgReplicationRate.after += targetReplicationRate / totalBagsNum + } + + private updateStorageUsage(stats: BeforeAfterStats, bagUpdate: BagUpdate): void { + stats.after += bagUpdate.size * (BigInt(bagUpdate.bucketsToAdd.size) - BigInt(bagUpdate.bucketsToRemove.size)) + } + + public updateSummaryWithBatchResults( + txHash: string, + results: ParsedBatchCallResult[], + bagUpdates: BagUpdate[] + ): void { + if (!this.config.skipTxSummary) { + this.initTxSummary(txHash) + } + + for (const i in results) { + const result = results[i] + const bagUpdate = bagUpdates[i] + if ('error' in result) { + this.handleFailedBagUpdate({ ...bagUpdate, error: result.error }) + } else { + this.handleSuccessfulBagUpdate(bagUpdate) + } + } + + if (this.currentTxSummary) { + this.currentTxSummary.avgReplicationRate.after = this.summary.avgReplicationRate.after + this.currentTxSummary.totalStorageUsage.after = this.summary.totalStorageUsage.after + } + } + + private roundBeforeAfterStat(stat: BeforeAfterStats, precision = 2) { + stat.before = _.round(stat.before, precision) + stat.after = _.round(stat.after, precision) + } + + public getSummary(): FinalSummary { + if (this.perBucketSummaries) { + this.summary.buckets = Array.from(this.perBucketSummaries.values()) + } + this.roundBeforeAfterStat(this.summary.avgReplicationRate) + if (this.summary.transactions) { + for (const txSummary of this.summary.transactions) { + this.roundBeforeAfterStat(txSummary.avgReplicationRate) + } + } + + return this.summary + } + + public getSummaryJSON(): string { + return JSON.stringify( + this.getSummary(), + (key, value) => { + if (typeof value === 'bigint') { + return value.toString() + } + if (value instanceof Enum) { + return stringifyBagId(value as BagId) + } + if (value instanceof Set) { + return Array.from(value) + } + return value + }, + 2 + ) + } +} diff --git a/storage-node/src/services/helpers/storageSize.ts b/storage-node/src/services/helpers/storageSize.ts new file mode 100644 index 0000000000..6eeda4a972 --- /dev/null +++ b/storage-node/src/services/helpers/storageSize.ts @@ -0,0 +1,34 @@ +export const UNITS = { + 'B': 1, + 'KB': 1_000, + 'MB': 1_000_000, + 'GB': 1_000_000_000, + 'TB': 1_000_000_000_000, + 'PB': 1_000_000_000_000_000, // MAX_SAFE_INTEGER / 9.007 +} as const + +export type UnitType = keyof typeof UNITS + +/** + * Converts storage size in bytes (BigInt) to a most approperiate unit (MB / GB / TB etc.), + * such that the size expressed in this unit is < 1_000 and can be converted to a Number. + * Optionally the target unit can also be forced. + * + * @param bytes Number of bytes (BigInt) + * @param forcedUnit Optional: Target unit to force + * @param decimals Number of digits past the decimal point to include in the result. + * @returns [Number, Unit] tuple + */ +export function asStorageSize(bytes: bigint, forcedUnit?: UnitType, decimals = 2): [number, UnitType] { + const unitEntires = Object.entries(UNITS) + let targetUnit = unitEntires.find(([unit]) => unit === forcedUnit) + if (!targetUnit) { + let i = 0 + while (bytes / BigInt(unitEntires[i][1]) >= 1_000 && i < unitEntires.length - 1) { + i += 1 + } + targetUnit = unitEntires[i] + } + const decimalScaler = Math.min(targetUnit[1], Math.pow(10, decimals)) + return [Number(bytes / BigInt(targetUnit[1] / decimalScaler)) / decimalScaler, targetUnit[0] as UnitType] +} diff --git a/storage-node/src/services/runtime/api.ts b/storage-node/src/services/runtime/api.ts index 569bc733ea..5db9abd890 100644 --- a/storage-node/src/services/runtime/api.ts +++ b/storage-node/src/services/runtime/api.ts @@ -5,7 +5,7 @@ import { KeyringPair } from '@polkadot/keyring/types' import { TypeRegistry } from '@polkadot/types' import type { Index } from '@polkadot/types/interfaces/runtime' import { DispatchError } from '@polkadot/types/interfaces/system' -import { IEvent, ISubmittableResult } from '@polkadot/types/types' +import { Codec, IEvent, ISubmittableResult } from '@polkadot/types/types' import { formatBalance } from '@polkadot/util' import AwaitLock from 'await-lock' import stringify from 'fast-safe-stringify' @@ -246,3 +246,11 @@ export function getEvents< >(result: SubmittableResult, section: S, eventNames: M[]): EventType[] { return result.filterRecords(section, eventNames).map((e) => e.event as unknown as EventType) } + +export function isEvent< + S extends keyof ApiPromise['events'] & string, + M extends keyof ApiPromise['events'][S] & string, + EventData extends Codec[] = ApiPromise['events'][S][M] extends AugmentedEvent<'promise', infer T> ? T : Codec[] +>(event: IEvent, section: S, method: M): event is IEvent { + return event.section === section && event.method === method +} diff --git a/storage-node/src/services/runtime/extrinsics.ts b/storage-node/src/services/runtime/extrinsics.ts index 924009f4f8..2acc8346d9 100644 --- a/storage-node/src/services/runtime/extrinsics.ts +++ b/storage-node/src/services/runtime/extrinsics.ts @@ -6,7 +6,11 @@ import { timeout } from 'promise-timeout' import logger from '../../services/logger' import { parseBagId } from '../helpers/bagTypes' import { AcceptPendingDataObjectsParams } from '../sync/acceptPendingObjects' -import { formatDispatchError, getEvent, getEvents, sendAndFollowNamedTx } from './api' +import { formatDispatchError, getEvent, getEvents, isEvent, sendAndFollowNamedTx } from './api' +import { ISubmittableResult } from '@polkadot/types/types' +import { SubmittableExtrinsic } from '@polkadot/api/types' +import { Call } from '@polkadot/types/interfaces' +import { Vec } from '@polkadot/types-codec' /** * Creates storage bucket. @@ -107,29 +111,54 @@ export async function updateStorageBucketsForBags( const txBatch = batchFn(txs) failedCalls = await sendAndFollowNamedTx(api, account, txBatch, (result) => { - const [batchCompletedEvent] = getEvents(result, 'utility', ['BatchCompleted']) - if (batchCompletedEvent) { - return [] - } - - // find all the failed calls based on their index - const events = getEvents(result, 'utility', ['ItemCompleted', 'ItemFailed']) - return events - .map((e, i) => { - if (e.method === 'ItemFailed') { - return { - args: txs[i].args.toString(), - error: formatDispatchError(api, e.data[0]), - } - } - }) - .filter(Boolean) + const batchResults = getBatchResults(txBatch, api, result) + return batchResults.flatMap((r, i) => ('error' in r ? [{ args: txs[i].args.toString(), error: r.error }] : [])) }) }) return [success, failedCalls] } +export type ParsedBatchCallResult = { success: true } | { error: string } + +/** + * Extracts individual call results from an utility.(batch|forceBatch|batchAll) + * extrinsic result. + * + * @param tx The utility.(batch|forceBatch|batchAll) extrinsic + * @param api @polkadot/api instance + * @param result Extrinsic result + * + * @returns An array of parsed results + */ +export function getBatchResults( + tx: SubmittableExtrinsic<'promise'>, + api: ApiPromise, + result: ISubmittableResult +): ParsedBatchCallResult[] { + const callsNum = (tx.args[0] as Vec).length + let results: ParsedBatchCallResult[] = [] + for (const { event } of result.events) { + if (isEvent(event, 'utility', 'ItemFailed')) { + const [dispatchError] = event.data + results.push({ error: formatDispatchError(api, dispatchError) }) + } + if (isEvent(event, 'utility', 'ItemCompleted')) { + results.push({ success: true }) + } + } + if (results.length < callsNum) { + results = [ + ...results, + ...Array.from({ length: callsNum - results.length }, () => ({ + error: 'Interrupted', + })), + ] + } + + return results +} + /** * Accepts pending data objects by storage provider in batch transaction. * diff --git a/tests/network-tests/run-tests.sh b/tests/network-tests/run-tests.sh index e6aedbf9e6..9ff51076df 100755 --- a/tests/network-tests/run-tests.sh +++ b/tests/network-tests/run-tests.sh @@ -38,6 +38,7 @@ if [ "${NO_QN}" != true ]; then fi if [ "${NO_STORAGE}" != true ]; then + export CLEANUP_INTERVAL ./start-storage.sh fi diff --git a/tests/network-tests/src/fixtures/storage/GenerateChannelAssetsFixture.ts b/tests/network-tests/src/fixtures/storage/GenerateChannelAssetsFixture.ts new file mode 100644 index 0000000000..0f1fea4956 --- /dev/null +++ b/tests/network-tests/src/fixtures/storage/GenerateChannelAssetsFixture.ts @@ -0,0 +1,159 @@ +import assert from 'assert' +import { readFileSync } from 'fs' +import { ChannelCreationInputParameters } from '@joystream/cli/src/Types' +import { MemberId } from '@joystream/types/primitives' +import { BaseQueryNodeFixture, FixtureRunner } from '../../Fixture' +import { JoystreamCLI } from '../../cli/joystream' +import { QueryNodeApi } from '../../QueryNodeApi' +import { Api } from '../../Api' +import { BuyMembershipHappyCaseFixture } from '../membership' +import { Utils } from '../../utils' +import { ColossusApi } from '../../../ColossusApi' +import { ChannelFieldsFragment } from '../../graphql/generated/queries' + +export type GenerateAssetsFixtureParams = { + numberOfChannels: number + avatarGenerator?: (i: number) => string + coverGenerator?: (i: number) => string +} + +export type CreatedChannelData = { + id: number + coverPhotoPath: string + avatarPhotoPath: string + qnData?: ChannelFieldsFragment +} + +export type VerifyAssetsInput = { + api: ColossusApi + channelIds: number[] +} + +export class GenerateAssetsFixture extends BaseQueryNodeFixture { + private _channelsCreated: CreatedChannelData[] = [] + + constructor( + public api: Api, + public query: QueryNodeApi, + public cli: JoystreamCLI, + private params: GenerateAssetsFixtureParams + ) { + super(api, query) + } + + public get channelsCreated(): CreatedChannelData[] { + assert(this._channelsCreated.length, 'Trying to retrieve channelsCreated before any results are available') + return this._channelsCreated + } + + private async setupChannelOwner() { + const { api, query } = this + // Create a member that will create the channels + const [memberKeyPair] = await api.createKeyPairs(1) + const memberAddr = memberKeyPair.key.address + const buyMembershipFixture = new BuyMembershipHappyCaseFixture(api, query, [memberAddr]) + await new FixtureRunner(buyMembershipFixture).run() + const [memberId] = buyMembershipFixture.getCreatedMembers() + + // Give member 10 JOY per channel, to be able to pay the fees + await api.treasuryTransferBalance(memberAddr, Utils.joy(10)) + + // Import the member controller key to CLI + this.cli.importAccount(memberKeyPair.key) + + return memberId + } + + /* + Execute this Fixture. + */ + public async execute(): Promise { + this.debug('Setting up channel owner') + const memberId = await this.setupChannelOwner() + + this.debug('Creating channels') + this._channelsCreated = await this.createChannels(memberId) + } + + private defaultAvatarGenerator(i: number): string { + return this.cli.getTmpFileManager().randomImgFile(300, 300) + } + + private defaultCoverGenerator(i: number): string { + return this.cli.getTmpFileManager().randomImgFile(1920, 500) + } + + /** + Generates the channels. + */ + private async createChannels(memberId: MemberId): Promise { + const { avatarGenerator: customAvatarGenerator, coverGenerator: customCoverGenerator } = this.params + const avatarGenerator = customAvatarGenerator || this.defaultAvatarGenerator.bind(this) + const coverGenerator = customCoverGenerator || this.defaultCoverGenerator.bind(this) + const channelsData: CreatedChannelData[] = [] + for (let i = 0; i < this.params.numberOfChannels; ++i) { + const avatarPhotoPath = avatarGenerator(i) + const coverPhotoPath = coverGenerator(i) + const channelInput: ChannelCreationInputParameters = { + title: `GenerateAssetsFixture channel ${i + 1}`, + avatarPhotoPath, + coverPhotoPath, + } + const channelId = await this.cli.createChannel(channelInput, [ + '--context', + 'Member', + '--useMemberId', + memberId.toString(), + ]) + this.debug(`Created channel ${i + 1} / ${this.params.numberOfChannels}`) + channelsData.push({ id: channelId, avatarPhotoPath, coverPhotoPath }) + } + return channelsData + } + + public async runQueryNodeChecks(): Promise { + await super.runQueryNodeChecks() + const { query, channelsCreated } = this + await query.tryQueryWithTimeout( + () => query.channelsByIds(channelsCreated.map(({ id }) => id.toString())), + (r) => { + this.channelsCreated.forEach((channelData) => { + const qnData = r.find((c) => c.id === channelData.id.toString()) + Utils.assert(qnData, `Cannot find channel ${channelData.id} in the query node`) + Utils.assert(qnData.avatarPhoto && qnData.coverPhoto, `Missing some assets in channel ${channelData.id}`) + channelData.qnData = qnData + }) + } + ) + } + + public verifyAssets = async (inputs: VerifyAssetsInput[], retryTime = 10_000, maxRetries = 18): Promise => { + await Utils.until( + `assets stored by Colossus nodes match expectations`, + async () => { + const verifyAssetsPromises = this.channelsCreated.map( + async ({ id, avatarPhotoPath, coverPhotoPath, qnData }) => { + Utils.assert(qnData && qnData.avatarPhoto && qnData.coverPhoto) + for (const { api: colossusApi, channelIds: expectedChannelIds } of inputs) { + if (expectedChannelIds.includes(id)) { + await Promise.all([ + colossusApi.fetchAndVerifyAsset(qnData.coverPhoto.id, readFileSync(coverPhotoPath), 'image/bmp'), + colossusApi.fetchAndVerifyAsset(qnData.avatarPhoto.id, readFileSync(avatarPhotoPath), 'image/bmp'), + ]) + } else { + await Promise.all([ + colossusApi.expectAssetNotFound(qnData.coverPhoto.id), + colossusApi.expectAssetNotFound(qnData.avatarPhoto.id), + ]) + } + } + } + ) + await Promise.all(verifyAssetsPromises) + return true + }, + retryTime, + maxRetries + ) + } +} diff --git a/tests/network-tests/src/fixtures/storage/SetReplicationRateFixture.ts b/tests/network-tests/src/fixtures/storage/SetReplicationRateFixture.ts new file mode 100644 index 0000000000..10ad8abbe0 --- /dev/null +++ b/tests/network-tests/src/fixtures/storage/SetReplicationRateFixture.ts @@ -0,0 +1,126 @@ +import _ from 'lodash' +import { PalletStorageStorageBucketRecord } from '@polkadot/types/lookup' +import { FinalSummary, Serialized } from 'storage-node/src/services/helpers/bagsUpdateSummary' +import { BaseQueryNodeFixture } from '../../Fixture' +import { QueryNodeApi } from '../../QueryNodeApi' +import { Api } from '../../Api' +import { assert } from 'chai' +import { StorageCLI } from '../../cli/storage' + +export type SetReplicationRateFixtureParams = { + oldRate: number + newRate: number + expectedNumUpdates: number + expectedBuckets: { + id: number + removed: { id: string; size: bigint }[] + added: { id: string; size: bigint }[] + }[] +} + +export class SetReplicationRateFixture extends BaseQueryNodeFixture { + constructor(public api: Api, public query: QueryNodeApi, private params: SetReplicationRateFixtureParams) { + super(api, query) + } + + private async setupStorageCLI() { + const leaderKey = await this.api.getLeadRoleKey('storageWorkingGroup') + const cli = new StorageCLI(this.api.getSuri(leaderKey)) + return cli + } + + private async checkResultAgainsExpectations( + result: Serialized, + storageBucketsBefore: (readonly [number, PalletStorageStorageBucketRecord])[] + ) { + const { oldRate, newRate, expectedNumUpdates, expectedBuckets } = this.params + + const expectedStorageUsageBefore = storageBucketsBefore.reduce((a, [, b]) => a + b.voucher.sizeUsed.toBigInt(), 0n) + const expectedStorageUsageAfter = (Number(expectedStorageUsageBefore) * (newRate / oldRate)).toString() + + assert.equal(result.avgReplicationRate.before, oldRate) + assert.equal(result.avgReplicationRate.after, newRate) + assert.equal(result.totalStorageUsage.before.toString(), expectedStorageUsageBefore.toString()) + assert.equal(result.totalStorageUsage.after.toString(), expectedStorageUsageAfter.toString()) + assert(result.transactions) + // Expecting 1 batch transaction only! + assert.equal(result.transactions[0].avgReplicationRate.before, oldRate) + assert.equal(result.transactions[0].avgReplicationRate.after, newRate) + assert.equal(result.transactions[0].failedUpdates.length, 0) + assert.equal(result.transactions[0].successfulUpdates.length, expectedNumUpdates) + assert.equal(result.transactions[0].totalStorageUsage.before.toString(), expectedStorageUsageBefore.toString()) + assert.equal(result.transactions[0].totalStorageUsage.after.toString(), expectedStorageUsageAfter.toString()) + const resultBuckets = result.buckets + assert(resultBuckets) + assert.equal(resultBuckets.length, expectedBuckets.length) + for (const i of resultBuckets.keys()) { + const [, bucketBefore] = storageBucketsBefore.find(([id]) => id === resultBuckets[i].id) || [] + const expectedBucket = expectedBuckets.find((b) => b.id === resultBuckets[i].id) + assert(bucketBefore) + assert(expectedBucket) + assert.equal(resultBuckets[i].storageUsed.before.toString(), bucketBefore.voucher.sizeUsed.toString()) + assert.sameDeepMembers( + resultBuckets[i].added.bags, + expectedBucket.added.map((v) => ({ ...v, size: v.size.toString() })) + ) + assert.sameDeepMembers( + resultBuckets[i].removed.bags, + expectedBucket.removed.map((v) => ({ ...v, size: v.size.toString() })) + ) + assert.equal( + resultBuckets[i].storageUsed.after.toString(), + ( + bucketBefore.voucher.sizeUsed.toBigInt() - + expectedBucket.removed.reduce((sum, b) => sum + b.size, 0n) + + expectedBucket.added.reduce((sum, b) => sum + b.size, 0n) + ).toString() + ) + assert.equal(resultBuckets[i].failedToAdd.bags.length, 0) + assert.equal(resultBuckets[i].failedToRemove.bags.length, 0) + } + } + + private async checkResultAgainstNewChainState(result: Serialized): Promise { + const storageBucketsAfter = (await this.api.query.storage.storageBucketById.entries()).map(([sKey, bucket]) => { + return [sKey.args[0].toNumber(), bucket.unwrap()] as const + }) + const storageBagsAfter = (await this.api.query.storage.bags.entries()).map(([sKey, bag]) => { + return [sKey.args[0], bag] as const + }) + + assert.closeTo( + result.avgReplicationRate.after, + _.meanBy(storageBagsAfter, ([, bag]) => bag.storedBy.size), + 1e-6 + ) + assert.equal( + result.totalStorageUsage.after, + storageBucketsAfter.reduce((sum, [, bucket]) => sum + bucket.voucher.sizeUsed.toBigInt(), 0n).toString() + ) + + for (const [bucketId, bucket] of storageBucketsAfter) { + const resultBucket = result.buckets?.find((b) => b.id === bucketId) + assert(resultBucket) + assert.equal(resultBucket.storageUsed.after, bucket.voucher.sizeUsed.toString()) + } + } + + /* + Execute this Fixture. + */ + public async execute(): Promise { + const { newRate } = this.params + + const storageBucketsBefore = (await this.api.query.storage.storageBucketById.entries()).map(([sKey, bucket]) => { + return [sKey.args[0].toNumber(), bucket.unwrap()] as const + }) + + const storageCli = await this.setupStorageCLI() + const flags = ['--rate', newRate.toString(), '--skipConfirmation'] + const { out } = await storageCli.run('leader:set-replication', flags) + const result: Serialized = JSON.parse(out) + + await this.checkResultAgainsExpectations(result, storageBucketsBefore) + await this.checkResultAgainstNewChainState(result) + } +} diff --git a/tests/network-tests/src/flows/storage/setReplicationRate.ts b/tests/network-tests/src/flows/storage/setReplicationRate.ts new file mode 100644 index 0000000000..0375dc4bf5 --- /dev/null +++ b/tests/network-tests/src/flows/storage/setReplicationRate.ts @@ -0,0 +1,182 @@ +import fs from 'fs/promises' +import urljoin from 'url-join' +import _ from 'lodash' +import { assert } from 'chai' +import { stringifyBagId } from 'storage-node/src/services/helpers/bagTypes' +import { FlowProps } from '../../Flow' +import { extendDebug } from '../../Debugger' +import { FixtureRunner } from '../../Fixture' +import { Utils } from '../../utils' +import { ColossusApi } from '../../../ColossusApi' +import { doubleBucketConfig } from './initStorage' +import { createJoystreamCli } from '../utils' +import { GenerateAssetsFixture } from '../../fixtures/storage/GenerateChannelAssetsFixture' +import { SetReplicationRateFixture } from '../../fixtures/storage/SetReplicationRateFixture' + +export async function setReplicationRate({ api, query }: FlowProps): Promise { + const debug = extendDebug('flow:setReplicationRate') + api.enableDebugTxLogs() + debug('Started') + + // Get storage leader key + const [, storageLeader] = await api.getLeader('storageWorkingGroup') + const storageLeaderKey = storageLeader.roleAccountId.toString() + + // Check preconditions: + const activeStorageBuckets = (await api.query.storage.storageBucketById.entries()) + .filter(([, b]) => b.unwrap().operatorStatus.isStorageWorker) + .map(([sKey, bucket]) => [sKey.args[0].toNumber(), bucket.unwrap()] as const) + const channelBagPolicies = await api.query.storage.dynamicBagCreationPolicies('Channel') + assert.equal(channelBagPolicies.numberOfStorageBuckets.toNumber(), 2) + assert.equal(activeStorageBuckets.length, 2) + debug('Preconditions OK') + + // Initialize Joystream CLI + const joystreamCli = await createJoystreamCli() + + // Generate assets + const NUM_CHANNELS = 3 + const generateAssetsFixture = new GenerateAssetsFixture(api, query, joystreamCli, { numberOfChannels: NUM_CHANNELS }) + await new FixtureRunner(generateAssetsFixture).runWithQueryNodeChecks() + const channelsData = generateAssetsFixture.channelsCreated + const channelIds = channelsData.map((c) => c.id).sort() + const singleChannelAssetsSize = + (await fs.stat(channelsData[0].avatarPhotoPath)).size + (await fs.stat(channelsData[0].coverPhotoPath)).size + + // Verify that both storage nodes store all assets of the created channels + const colossus1Endpoint = doubleBucketConfig.buckets[0].metadata.endpoint + const colossus2Endpoint = doubleBucketConfig.buckets[1].metadata.endpoint + Utils.assert(colossus1Endpoint && colossus2Endpoint, 'Missing one of 2 colossus node endpoints!') + + const colossus1Api = new ColossusApi(urljoin(colossus1Endpoint, 'api/v1')) + const colossus2Api = new ColossusApi(urljoin(colossus2Endpoint, 'api/v1')) + + debug('Checking if both storage nodes store all assets...') + await generateAssetsFixture.verifyAssets([ + { api: colossus1Api, channelIds }, + { api: colossus2Api, channelIds }, + ]) + + // Adjust vouchers so that + // 1. bucket0 (colossus1) has `singleChannelAssetsSize - 1` available space + // 2. bucket1 (colossus2) has 0 available space + + // This would cause the channel bags to be REMOVED in the following order when setting replication rate to 1: + // from bucket1, from bucket0, from bucket1 ... + + // After the change: + // 1. bucket0 will have `singleChannelAssetsSize * (floor(NUM_CHANNELS / 2) + 1) - 1` available space + // 2. bucket1 will have `singleChannelAssetsSize * (floor(NUM_CHANNELS / 2) + NUM_CHANNELS % 2)` available space + + // Assuming NUM_CHANNELS % 2 === 1, this would cause the channel bags to be ADDED in the following order + // when setting replication rate back to 2: + // to bucket1, to bucket0, to bucket1 ... + + debug('Updating storage bucket voucher limits...') + const updateLimitTxs = [ + api.tx.storage.setStorageBucketVoucherLimits( + 0, + singleChannelAssetsSize * (NUM_CHANNELS + 1) - 1, + await api.query.storage.voucherMaxObjectsNumberLimit() + ), + api.tx.storage.setStorageBucketVoucherLimits( + 1, + singleChannelAssetsSize * NUM_CHANNELS, + await api.query.storage.voucherMaxObjectsNumberLimit() + ), + ] + await api.sendExtrinsicsAndGetResults(updateLimitTxs, storageLeaderKey) + + // Update number of storage buckets in dynamic bag creation policy to 1 + debug('Updating channel bag creation policy (1)...') + const updateDynamicBagPolicyTx = api.tx.storage.updateNumberOfStorageBucketsInDynamicBagCreationPolicy('Channel', 1) + await api.sendExtrinsicsAndGetResults([updateDynamicBagPolicyTx], storageLeaderKey) + + // Adjust the actual replication rate to match the new policy + const staticBagIds = (await api.query.storage.bags.entries()) + .map(([sKey]) => sKey.args[0]) + .filter((bagId) => bagId.isStatic) + .map(stringifyBagId) + + // Channel bags should be removed alternately starting from bucket1 + // (due to voucher configuration provided above) + const bucket0ExpectedChannelsRemoved = channelIds.filter((c, i) => i % 2 === 1) + const bucket1ExpectedChannelsRemoved = channelIds.filter((c, i) => i % 2 === 0) + + const expectedBagRemovalsByBucket = new Map([ + [ + 0, + bucket0ExpectedChannelsRemoved.map((id) => ({ + id: `dynamic:channel:${id}`, + size: BigInt(singleChannelAssetsSize), + })), + ], + [ + 1, + [ + // Because all static bags are empty, they will be removed from bucket1, + // as it initally has less storage available + ...staticBagIds.map((id) => ({ id, size: BigInt(0) })), + ...bucket1ExpectedChannelsRemoved.map((id) => ({ + id: `dynamic:channel:${id}`, + size: BigInt(singleChannelAssetsSize), + })), + ], + ], + ]) + + debug('Setting replication rate (1)...') + const setReplicationRateFixture = new SetReplicationRateFixture(api, query, { + oldRate: 2, + newRate: 1, + expectedNumUpdates: staticBagIds.length + 3, + expectedBuckets: Array.from(expectedBagRemovalsByBucket.entries()).map(([bucketId, bagRemovals]) => ({ + id: bucketId, + removed: bagRemovals, + added: [], + })), + }) + await new FixtureRunner(setReplicationRateFixture).run() + + debug('Checking if storage nodes only store expected assets...') + await generateAssetsFixture.verifyAssets([ + { api: colossus1Api, channelIds: _.difference(channelIds, bucket0ExpectedChannelsRemoved) }, + { api: colossus2Api, channelIds: _.difference(channelIds, bucket1ExpectedChannelsRemoved) }, + ]) + + // Update number of storage buckets in dynamic bag creation policy back to 2 + debug('Updating channel bag creation policy (2)...') + const updateDynamicBagPolicy2Tx = api.tx.storage.updateNumberOfStorageBucketsInDynamicBagCreationPolicy('Channel', 2) + await api.sendExtrinsicsAndGetResults([updateDynamicBagPolicy2Tx], storageLeaderKey) + + // Adjust the actual replication rate to match the new policy + debug('Setting replication rate (2)...') + const setReplicationRateFixture2 = new SetReplicationRateFixture(api, query, { + oldRate: 1, + newRate: 2, + expectedNumUpdates: staticBagIds.length + 3, + // bucket1 will initially have more storage avialable, so the order of adding bags to buckets will match + // the order in which they were removed + expectedBuckets: Array.from(expectedBagRemovalsByBucket.entries()).map(([bucketId, bagRemovals]) => ({ + id: bucketId, + removed: [], + added: bagRemovals, + })), + }) + await new FixtureRunner(setReplicationRateFixture2).run() + + debug('Checking if both storage nodes store all assets...') + await generateAssetsFixture.verifyAssets([ + { api: colossus1Api, channelIds }, + { api: colossus2Api, channelIds }, + ]) + + // Restore previous storage bucket voucher limits + debug('Restoring previous storage bucket voucher limits...') + const restoreLimitTxs = activeStorageBuckets.map(([bucketId, bucket]) => + api.tx.storage.setStorageBucketVoucherLimits(bucketId, bucket.voucher.sizeLimit, bucket.voucher.objectsLimit) + ) + await api.sendExtrinsicsAndGetResults(restoreLimitTxs, storageLeaderKey) + + debug('Done') +} diff --git a/tests/network-tests/src/flows/storage/storageCleanup.ts b/tests/network-tests/src/flows/storage/storageCleanup.ts index 75b3ddaf20..b715fd7883 100644 --- a/tests/network-tests/src/flows/storage/storageCleanup.ts +++ b/tests/network-tests/src/flows/storage/storageCleanup.ts @@ -1,74 +1,29 @@ +import urljoin from 'url-join' +import { createType } from '@joystream/types' import { FlowProps } from '../../Flow' import { extendDebug } from '../../Debugger' -import { BuyMembershipHappyCaseFixture } from '../../fixtures/membership' +import { GenerateAssetsFixture } from '../../fixtures/storage/GenerateChannelAssetsFixture' import { FixtureRunner } from '../../Fixture' -import { createType } from '@joystream/types' -import { ChannelCreationInputParameters } from '@joystream/cli/src/Types' import { Utils } from '../../utils' import { ColossusApi } from '../../../ColossusApi' import { doubleBucketConfig } from './initStorage' -import { readFileSync } from 'fs' import { createJoystreamCli } from '../utils' -import urljoin from 'url-join' export async function storageCleanup({ api, query }: FlowProps): Promise { const debug = extendDebug('flow:storageCleanup') api.enableDebugTxLogs() debug('Started') - // Get sotrage leader key + // Get storage leader key const [, storageLeader] = await api.getLeader('storageWorkingGroup') const storageLeaderKey = storageLeader.roleAccountId.toString() - // Create a member that will create the channels - const [, memberKeyPair] = await api.createKeyPairs(2) - const memberAddr = memberKeyPair.key.address - const buyMembershipFixture = new BuyMembershipHappyCaseFixture(api, query, [memberAddr]) - await new FixtureRunner(buyMembershipFixture).run() - const [memberId] = buyMembershipFixture.getCreatedMembers() - - // Give member 100 JOY, to be able to create a few channels through CLI - await api.treasuryTransferBalance(memberAddr, Utils.joy(100)) - - // Use JoystreamCLI to create a few channels w/ some avatarPhoto and coverPhoto objects + // Generate channel assets const joystreamCli = await createJoystreamCli() - await joystreamCli.importAccount(memberKeyPair.key) - - const numChannels = 3 - - const channelsData: { channelId: number; avatarPhotoPath: string; coverPhotoPath: string }[] = [] - for (let i = 0; i < numChannels; ++i) { - const avatarPhotoPath = joystreamCli.getTmpFileManager().randomImgFile(300, 300) - const coverPhotoPath = joystreamCli.getTmpFileManager().randomImgFile(1920, 500) - const channelInput: ChannelCreationInputParameters = { - title: `Cleanup test channel ${i + 1}`, - avatarPhotoPath, - coverPhotoPath, - description: `This is a cleanup test channel ${i + 1}`, - isPublic: true, - language: 'en', - } - const channelId = await joystreamCli.createChannel(channelInput, [ - '--context', - 'Member', - '--useMemberId', - memberId.toString(), - ]) - debug(`Created channel ${i + 1}`) - channelsData.push({ channelId, avatarPhotoPath, coverPhotoPath }) - } - const channelIds = channelsData.map((c) => c.channelId) - - // Wait until QN processes the channels - debug('Waiting for QN to process the channels...') - const channels = await query.tryQueryWithTimeout( - () => query.channelsByIds(channelIds.map((id) => id.toString())), - (r) => Utils.assert(r.length === numChannels, `Expected ${numChannels} channels, found: ${r.length}`) - ) - - // Give colossus nodes some time to sync - debug('Giving colossus nodes 120 seconds to sync...') - await Utils.wait(120_000) + const generateAssetsFixture = new GenerateAssetsFixture(api, query, joystreamCli, { numberOfChannels: 3 }) + await new FixtureRunner(generateAssetsFixture).runWithQueryNodeChecks() + const channelsData = generateAssetsFixture.channelsCreated + const channelIds = channelsData.map((c) => c.id) // Verify that both storage nodes store all the assets of created channels const colossus1Endpoint = doubleBucketConfig.buckets[0].metadata.endpoint @@ -78,39 +33,10 @@ export async function storageCleanup({ api, query }: FlowProps): Promise { const colossus1Api = new ColossusApi(urljoin(colossus1Endpoint, 'api/v1')) const colossus2Api = new ColossusApi(urljoin(colossus2Endpoint, 'api/v1')) - const verifyAssets = async (colossus1StoredChannelIds: number[], colossus2StoredChannelIds: number[]) => { - const verifyAssetsPromises = channelsData.map(async ({ channelId, avatarPhotoPath, coverPhotoPath }) => { - const channel = channels.find((c) => c.id === channelId.toString()) - Utils.assert(channel, `Channel ${channelId} missing in QN result`) - Utils.assert(channel.coverPhoto && channel.avatarPhoto, `Channel assets missing in QN result`) - if (colossus1StoredChannelIds.includes(channelId)) { - await Promise.all([ - colossus1Api.fetchAndVerifyAsset(channel.coverPhoto.id, readFileSync(coverPhotoPath), 'image/bmp'), - colossus1Api.fetchAndVerifyAsset(channel.avatarPhoto.id, readFileSync(avatarPhotoPath), 'image/bmp'), - ]) - } else { - await Promise.all([ - colossus1Api.expectAssetNotFound(channel.coverPhoto.id), - colossus1Api.expectAssetNotFound(channel.avatarPhoto.id), - ]) - } - if (colossus2StoredChannelIds.includes(channelId)) { - await Promise.all([ - colossus2Api.fetchAndVerifyAsset(channel.coverPhoto.id, readFileSync(coverPhotoPath), 'image/bmp'), - colossus2Api.fetchAndVerifyAsset(channel.avatarPhoto.id, readFileSync(avatarPhotoPath), 'image/bmp'), - ]) - } else { - await Promise.all([ - colossus2Api.expectAssetNotFound(channel.coverPhoto.id), - colossus2Api.expectAssetNotFound(channel.avatarPhoto.id), - ]) - } - }) - await Promise.all(verifyAssetsPromises) - } - - // At this point we expect both nodes to store all assets - await verifyAssets(channelIds, channelIds) + await generateAssetsFixture.verifyAssets([ + { api: colossus1Api, channelIds }, + { api: colossus2Api, channelIds }, + ]) debug('All assets correctly stored!') // Delete the 1st channel @@ -126,23 +52,22 @@ export async function storageCleanup({ api, query }: FlowProps): Promise { api.tx.storage.updateStorageBucketsForBag( bag1Id, createType('BTreeSet', []), - createType('BTreeSet', [1]) // Remove 1st bucket (colossu2) + createType('BTreeSet', [1]) // Remove 1st bucket (colossus2) ), api.tx.storage.updateStorageBucketsForBag( bag2Id, createType('BTreeSet', []), - createType('BTreeSet', [0]) // Remove 0th bucket (colossu1) + createType('BTreeSet', [0]) // Remove 0th bucket (colossus1) ), ] await api.sendExtrinsicsAndGetResults(updateTxs, storageLeaderKey) - // Wait 2 minutes to make sure cleanup is executed - debug('Giving nodes 120 seconds to cleanup...') - await Utils.wait(120_000) - - // Verify that Colossus2 (w/ auto cleanup) no longer stores 1st and 2nd channel assets, - // while Colossus1 still stores all assets - await verifyAssets(channelIds, channelIds.slice(2)) + // Verify that colossus1 only stores 2nd channel assets, + // while colossus2 only stores 3rd channel assets + await generateAssetsFixture.verifyAssets([ + { api: colossus1Api, channelIds: [channelIds[1]] }, + { api: colossus2Api, channelIds: [channelIds[2]] }, + ]) debug('Cleanup correctly executed!') debug('Done') diff --git a/tests/network-tests/src/scenarios/full.ts b/tests/network-tests/src/scenarios/full.ts index ae372f5262..9daaec620c 100644 --- a/tests/network-tests/src/scenarios/full.ts +++ b/tests/network-tests/src/scenarios/full.ts @@ -22,8 +22,6 @@ import failToElect from '../flows/council/failToElect' import exactExecutionBlock from '../flows/proposals/exactExecutionBlock' import expireProposal from '../flows/proposals/expireProposal' import proposalsDiscussion from '../flows/proposalsDiscussion' -import initDistributionBucket from '../flows/clis/initDistributionBucket' -import initStorageBucket from '../flows/clis/initStorageBucket' import channelsAndVideos from '../flows/clis/channelsAndVideos' import { scenario } from '../Scenario' import activeVideoCounters from '../flows/content/activeVideoCounters' @@ -42,7 +40,6 @@ import { updateApp } from '../flows/content/updateApp' import curatorModerationActions from '../flows/content/curatorModerationActions' import collaboratorAndCuratorPermissions from '../flows/content/collaboratorAndCuratorPermissions' import updateValidatorVerificationStatus from '../flows/membership/updateValidatorVerifications' -import { storageCleanup } from '../flows/storage/storageCleanup' // eslint-disable-next-line @typescript-eslint/no-floating-promises scenario('Full', async ({ job }) => { @@ -113,21 +110,10 @@ scenario('Full', async ({ job }) => { 'curators and collaborators permissions', collaboratorAndCuratorPermissions ).after(curatorModerationActionsJob) - const directChannelPaymentJob = job('direct channel payment by members', directChannelPayment).after( - collaboratorAndCuratorPermissionsJob - ) + job('direct channel payment by members', directChannelPayment).after(collaboratorAndCuratorPermissionsJob) // Apps job('create app', createApp).after(hireLeads) job('update app', updateApp).after(hireLeads) job('create app actions', createAppActions).after(hireLeads) - - const contentDirectoryJob = directChannelPaymentJob // keep updated to last job above - - // Storage cleanup - const storageCleanupJob = job('storage cleanup', storageCleanup).after(contentDirectoryJob) - // Storage & distribution CLIs - job('init storage and distribution buckets via CLI', [initDistributionBucket, initStorageBucket]).after( - storageCleanupJob - ) }) diff --git a/tests/network-tests/src/scenarios/storage.ts b/tests/network-tests/src/scenarios/storage.ts new file mode 100644 index 0000000000..27ba496559 --- /dev/null +++ b/tests/network-tests/src/scenarios/storage.ts @@ -0,0 +1,17 @@ +import { scenario } from '../Scenario' +import initDistributionBucket from '../flows/clis/initDistributionBucket' +import initStorageBucket from '../flows/clis/initStorageBucket' +import { storageSync } from '../flows/storage/storageSync' +import { storageCleanup } from '../flows/storage/storageCleanup' +import { setReplicationRate } from '../flows/storage/setReplicationRate' + +// eslint-disable-next-line @typescript-eslint/no-floating-promises +scenario('Storage', async ({ job }) => { + const setReplicationRateJob = job('set replication rate', setReplicationRate) + const storageSyncJob = job('storage sync', storageSync).after(setReplicationRateJob) + const storageCleanupJob = job('storage cleanup', storageCleanup).after(storageSyncJob) + // Storage & distribution CLIs + job('init storage and distribution buckets via CLI', [initDistributionBucket, initStorageBucket]).after( + storageCleanupJob + ) +}) diff --git a/tests/network-tests/src/scenarios/storageSync.ts b/tests/network-tests/src/scenarios/storageSync.ts deleted file mode 100644 index 2973de2fe8..0000000000 --- a/tests/network-tests/src/scenarios/storageSync.ts +++ /dev/null @@ -1,8 +0,0 @@ -import { scenario } from '../Scenario' -import { storageSync } from '../flows/storage/storageSync' - -// eslint-disable-next-line @typescript-eslint/no-floating-promises -scenario('Storage sync', async ({ job }) => { - // DEPENDS OF STORGE BEEING INITIALIZED WITH AT LEAST 2 BUCKETS! - job('test storage node sync', storageSync) -}) diff --git a/tests/network-tests/start-storage.sh b/tests/network-tests/start-storage.sh index 6acb285a73..caca30e0d5 100755 --- a/tests/network-tests/start-storage.sh +++ b/tests/network-tests/start-storage.sh @@ -13,6 +13,19 @@ export COLOSSUS_1_URL="http://${HOST_IP}:3333" export DISTRIBUTOR_1_URL="http://${HOST_IP}:3334" export COLOSSUS_2_URL="http://${HOST_IP}:3335" export DISTRIBUTOR_2_URL="http://${HOST_IP}:3336" + +if [ ! -z "$CLEANUP_INTERVAL" ]; then + # Cleanup testing configuration + export CLEANUP="true" + export CLEANUP_INTERVAL + export CLEANUP_NEW_OBJECT_EXPIRATION_PERIOD=10 # 10 seconds + export CLEANUP_MIN_REPLICATION_THRESHOLD=1 + echo "Cleanup enabled!" + echo "Cleanup interval: ${CLEANUP_INTERVAL}m" + echo "New object expiration period: ${CLEANUP_NEW_OBJECT_EXPIRATION_PERIOD}s" + echo "Min. replication threshold: ${CLEANUP_MIN_REPLICATION_THRESHOLD}" +fi + $THIS_DIR/run-test-scenario.sh initStorageAndDistribution # give QN time to catch up so nodes can get their initial state From 61c1ec4756259659fb260c7dccfbaf6424bef05a Mon Sep 17 00:00:00 2001 From: Lezek123 Date: Tue, 11 Feb 2025 19:01:28 +0100 Subject: [PATCH 2/3] GitHub workflows: Migrate actions/(upload|download)-artifact to v4 --- .github/workflows/create-release.yml | 4 ++-- .github/workflows/deploy-node-network.yml | 2 +- .github/workflows/deploy-playground.yml | 4 ++-- .github/workflows/run-network-tests.yml | 4 ++-- devops/extrinsic-ordering/tx-ordering.yml | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/create-release.yml b/.github/workflows/create-release.yml index 2f18f24993..a555e8c012 100644 --- a/.github/workflows/create-release.yml +++ b/.github/workflows/create-release.yml @@ -39,7 +39,7 @@ jobs: tar czvf joystream-node-macos.tar.gz -C ./target/release joystream-node - name: Temporarily save node binary - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: joystream-node-macos-${{ steps.compute_shasum.outputs.shasum }} path: joystream-node-macos.tar.gz @@ -80,7 +80,7 @@ jobs: tar -czvf joystream-node-$VERSION_AND_COMMIT-arm64-linux-gnu.tar.gz joystream-node - name: Retrieve saved MacOS binary - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: joystream-node-macos-${{ steps.compute_shasum.outputs.shasum }} diff --git a/.github/workflows/deploy-node-network.yml b/.github/workflows/deploy-node-network.yml index 5d51135494..23f02eb537 100644 --- a/.github/workflows/deploy-node-network.yml +++ b/.github/workflows/deploy-node-network.yml @@ -168,7 +168,7 @@ jobs: 7z a -p${{ steps.network_config.outputs.encryptionKey }} chain-data.7z deploy_artifacts/* - name: Save the output as an artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: data-chainspec-auth path: devops/ansible/chain-data.7z diff --git a/.github/workflows/deploy-playground.yml b/.github/workflows/deploy-playground.yml index 2c5abe6813..f15e40cf1e 100644 --- a/.github/workflows/deploy-playground.yml +++ b/.github/workflows/deploy-playground.yml @@ -34,7 +34,7 @@ on: description: 'SURI of treasury account' required: false default: '//Alice' - initialBalances: + initialBalances: description: 'JSON string or http URL to override initial balances and vesting config' default: '' required: false @@ -112,7 +112,7 @@ jobs: --verbose - name: Save the endpoints file as an artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: endpoints path: devops/ansible/endpoints.json diff --git a/.github/workflows/run-network-tests.yml b/.github/workflows/run-network-tests.yml index fef9b8be46..d4ab57e4dc 100644 --- a/.github/workflows/run-network-tests.yml +++ b/.github/workflows/run-network-tests.yml @@ -141,7 +141,7 @@ jobs: if: steps.check_files.outputs.files_exists == 'false' - name: Save joystream/node image to Artifacts - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ${{ steps.compute_shasum.outputs.shasum }}-joystream-node-docker-image.tar.gz path: joystream-node-docker-image.tar.gz @@ -166,7 +166,7 @@ jobs: with: node-version: '18.x' - name: Get artifacts - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: ${{ needs.build_images.outputs.use_artifact }} - name: Install artifacts diff --git a/devops/extrinsic-ordering/tx-ordering.yml b/devops/extrinsic-ordering/tx-ordering.yml index 0613ed04d3..fbe514174e 100644 --- a/devops/extrinsic-ordering/tx-ordering.yml +++ b/devops/extrinsic-ordering/tx-ordering.yml @@ -74,7 +74,7 @@ jobs: run: pkill polkadot - name: Save output as artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ${{ env.CHAIN }} path: | From 014dc7ea089704af7373bf90bfca140435fdaa9b Mon Sep 17 00:00:00 2001 From: Lezek123 Date: Tue, 11 Feb 2025 20:12:59 +0100 Subject: [PATCH 3/3] Colossus: Add missing dependencies --- storage-node/package.json | 2 ++ yarn.lock | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/storage-node/package.json b/storage-node/package.json index c3ad4ed960..6d56165830 100644 --- a/storage-node/package.json +++ b/storage-node/package.json @@ -20,6 +20,7 @@ "@oclif/plugin-help": "^3", "@polkadot/api": "10.7.1", "@types/base64url": "^2.0.0", + "@types/cli-progress": "^3.11.6", "@types/express": "4.17.13", "@types/file-type": "^10.9.1", "@types/lodash": "^4.14.171", @@ -39,6 +40,7 @@ "base64url": "^3.0.1", "blake3-wasm": "^2.1.5", "chokidar": "4.0.1", + "cli-progress": "^3.12.0", "cors": "^2.8.5", "cross-fetch": "^3.1.4", "express": "4.17.1", diff --git a/yarn.lock b/yarn.lock index 6e9cdd6401..8762846c2a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -5618,6 +5618,13 @@ resolved "https://registry.npmjs.org/@types/chai/-/chai-4.3.1.tgz" integrity sha512-/zPMqDkzSZ8t3VtxOa4KPq7uzzW978M9Tvh+j7GHKuo6k6GTLxPJ4J5gE5cjfJ26pnXst0N5Hax8Sr0T2Mi9zQ== +"@types/cli-progress@^3.11.6": + version "3.11.6" + resolved "https://registry.yarnpkg.com/@types/cli-progress/-/cli-progress-3.11.6.tgz#94b334ebe4190f710e51c1bf9b4fedb681fa9e45" + integrity sha512-cE3+jb9WRlu+uOSAugewNpITJDt1VF8dHOopPO4IABFc3SXYL5WE/+PTz/FCdZRRfIujiWW3n3aMbv1eIGVRWA== + dependencies: + "@types/node" "*" + "@types/cli-progress@^3.9.1": version "3.11.0" resolved "https://registry.npmjs.org/@types/cli-progress/-/cli-progress-3.11.0.tgz" @@ -8780,6 +8787,13 @@ cli-highlight@^2.1.11: parse5-htmlparser2-tree-adapter "^6.0.0" yargs "^16.0.0" +cli-progress@^3.12.0: + version "3.12.0" + resolved "https://registry.yarnpkg.com/cli-progress/-/cli-progress-3.12.0.tgz#807ee14b66bcc086258e444ad0f19e7d42577942" + integrity sha512-tRkV3HJ1ASwm19THiiLIXLO7Im7wlTuKnvkYaTkyoAPefqjNg7W7DHKUlGRxy9vxDvbyCYQkQozvptuMkGCg8A== + dependencies: + string-width "^4.2.3" + cli-progress@^3.4.0, cli-progress@^3.9.0: version "3.11.0" resolved "https://registry.npmjs.org/cli-progress/-/cli-progress-3.11.0.tgz"