From 22cbb8d24417983d038c6f0531777734497efd59 Mon Sep 17 00:00:00 2001 From: Samika Kashyap Date: Mon, 24 Jun 2024 14:14:39 -0700 Subject: [PATCH 01/11] feat: load generator --- k8s/operator/kustomization.yaml | 6 +- k8s/operator/manifests/operator.yaml | 4 +- keramik/src/simulation.md | 23 +- operator/src/simulation/controller.rs | 19 +- runner/Cargo.toml | 1 + runner/src/scenario/ceramic/mod.rs | 3 +- runner/src/scenario/mod.rs | 1 + runner/src/scenario/stability_test_utils.rs | 141 +++++++++++ runner/src/simulate.rs | 260 ++++++++++++++++++-- 9 files changed, 404 insertions(+), 54 deletions(-) create mode 100644 runner/src/scenario/stability_test_utils.rs diff --git a/k8s/operator/kustomization.yaml b/k8s/operator/kustomization.yaml index 673d8d9a..67e1bcb4 100644 --- a/k8s/operator/kustomization.yaml +++ b/k8s/operator/kustomization.yaml @@ -15,9 +15,9 @@ resources: # Additionally ensure that ./k8s/operator/manifests/operator.yaml uses Always as the imagePullPolicy # images: - - name: keramik/operator - newName: public.ecr.aws/r5b3e0r5/3box/keramik-operator - newTag: latest + - name: samika98/operator + newName: samika98/operator + newTag: lgen # Uncomment for development # diff --git a/k8s/operator/manifests/operator.yaml b/k8s/operator/manifests/operator.yaml index 1fb41be8..0feae55f 100644 --- a/k8s/operator/manifests/operator.yaml +++ b/k8s/operator/manifests/operator.yaml @@ -111,8 +111,8 @@ spec: {} containers: - name: keramik-operator - image: "keramik/operator" - imagePullPolicy: Always # Should be IfNotPresent when using imageTag: dev, but Always if using imageTag: latest + image: "samika98/operator" + imagePullPolicy: IfNotPresent # Should be IfNotPresent when using imageTag: dev, but Always if using imageTag: latest command: - "/usr/bin/keramik-operator" - "daemon" diff --git a/keramik/src/simulation.md b/keramik/src/simulation.md index 63f3eff2..d2cd8ec3 100644 --- a/keramik/src/simulation.md +++ b/keramik/src/simulation.md @@ -10,6 +10,8 @@ To run a simulation, first define a simulation. Available simulation types are - `recon-event-sync` - A simulation that creates events for Recon to sync at a fixed rate (~300/s by default). Designed for a 2 node network but should work on any. - `cas-benchmark` - A simulation that benchmarks the CAS network. - `cas-anchoring-benchmark` - A simulation that benchmarks the Ceramic with anchoring enabled. +- `cas-benchmark` - A simulation that benchmarks the CAS network. +- `cas-anchoring-benchmark` - A simulation that benchmarks the Ceramic with anchoring enabled. Using one of these scenarios, we can then define the configuration for that scenario: @@ -152,15 +154,14 @@ spec: ``` ```shell -kubectl apply -f custom-cas-api.yaml +kubectl apply -f custom-ipfs.yaml ``` ### Example Custom Simulation for Ceramic Anchoring Benchmark Use this example to run a simulation which uses the CAS Api defined in the network spec. -`anchorWaitTime`: Wait time in seconds for how long we want to wait after streams have been created to check when they have been anchored. This should be a high number like 30-40 minutes. -`throttleRequests`: Number of requests to send per second. - +anchorWaitTime : Wait time in seconds for how long we want to wait after streams have been created to check when they have been anchored. This should be a high number like 30-40 minutes. +throttleRequests: Number of requests to send per second. ```yaml # ceramic-anchoring-benchamrk.yaml --- @@ -176,8 +177,6 @@ spec: runTime: 60 throttleRequests: 100 anchorWaitTime: 2400 -``` - ```shell kubectl apply -f ceramic-anchoring-benchamrk.yaml @@ -185,13 +184,10 @@ kubectl apply -f ceramic-anchoring-benchamrk.yaml ### Example Custom Simulation for cas-benchmark -Use this example to run a simulation you can pass in the the cas-api-url, the network-type, and the private secret key in the spec. -By default the casNetwork and casController are set to run against cas-dev-direct Api. - -`casNetwork`: The url of the CAS network to run the simulation against. - -`casController`: The private key of the controller DID to use for the simulation. - +Use this example to run a simulation you can pass in the the cas-api-url, the network-type, and the private secret ket as the controller. +By default the casNetwork is set to "https://cas-dev-direct.3boxlabs.com" and the casController is set to the private key of the controller DID. +casNetwork: The url of the CAS network to run the simulation against. +casController: The private key of the controller DID to use for the simulation. ```yaml # cas-benchmark.yaml --- @@ -208,7 +204,6 @@ spec: throttleRequests: 100 casNetwork: "https://cas-dev-direct.3boxlabs.com" casController: "did:key:" -``` ```shell kubectl apply -f cas-benchmark.yaml diff --git a/operator/src/simulation/controller.rs b/operator/src/simulation/controller.rs index 23587498..f145324f 100644 --- a/operator/src/simulation/controller.rs +++ b/operator/src/simulation/controller.rs @@ -196,15 +196,16 @@ async fn reconcile_( if manager_ready > 0 { //for loop n peers - apply_n_workers( - cx.clone(), - &ns, - num_peers, - &status, - simulation.clone(), - job_image_config.clone(), - ) - .await?; + + // apply_n_workers( + // cx.clone(), + // &ns, + // num_peers, + // &status, + // simulation.clone(), + // job_image_config.clone(), + // ) + // .await?; } let simulations: Api = Api::namespaced(cx.k_client.clone(), &ns); diff --git a/runner/Cargo.toml b/runner/Cargo.toml index cb5dbb99..415a1860 100644 --- a/runner/Cargo.toml +++ b/runner/Cargo.toml @@ -14,6 +14,7 @@ ceramic-http-client = { git = "https://github.com/3box/ceramic-http-client-rs.gi #ceramic-http-client = { path = "../../ceramic-http-client-rs", default-features = false } clap.workspace = true did-method-key = "0.2" +futures = "0.3" goose = { version = "0.16", features = ["gaggle"] } hex.workspace = true keramik-common = { workspace = true, features = ["telemetry", "tokio-console"] } diff --git a/runner/src/scenario/ceramic/mod.rs b/runner/src/scenario/ceramic/mod.rs index 0aab8cb8..0947d4d3 100644 --- a/runner/src/scenario/ceramic/mod.rs +++ b/runner/src/scenario/ceramic/mod.rs @@ -1,11 +1,10 @@ pub mod anchor; pub mod model_instance; -mod models; +pub mod models; pub mod new_streams; pub mod query; pub mod simple; pub mod write_only; - use ceramic_core::ssi::did::{DIDMethod, Document, DocumentBuilder, Source}; use ceramic_core::ssi::jwk::{self, Base64urlUInt, Params, JWK}; use ceramic_http_client::ceramic_event::JwkSigner; diff --git a/runner/src/scenario/mod.rs b/runner/src/scenario/mod.rs index ee00d7aa..83c661bb 100644 --- a/runner/src/scenario/mod.rs +++ b/runner/src/scenario/mod.rs @@ -4,6 +4,7 @@ use goose::GooseError; pub mod ceramic; pub mod ipfs_block_fetch; pub mod recon_sync; +pub mod stability_test_utils; pub mod util; static FIRST_USER: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(true); diff --git a/runner/src/scenario/stability_test_utils.rs b/runner/src/scenario/stability_test_utils.rs new file mode 100644 index 00000000..0b096430 --- /dev/null +++ b/runner/src/scenario/stability_test_utils.rs @@ -0,0 +1,141 @@ +use anyhow::Result; +use ceramic_http_client::{ + api::{self}, + ceramic_event::StreamId, + ModelAccountRelation, ModelDefinition, +}; +use reqwest::Client; +use std::time::Duration; +// use goose::{metrics::GooseRequestMetric, prelude::*}; +// use redis::AsyncCommands; +use super::ceramic::models::RandomModelInstance; +use crate::scenario::ceramic::models::SmallModel; +use crate::scenario::ceramic::CeramicClient; +use tracing::info; + +// Define the StableLoadUser struct with an HTTP client and a throttle rate. + +#[derive(Clone)] +pub struct StableLoadUser { + pub ceramic_client: CeramicClient, + pub http_client: Client, + pub throttle_rate: Duration, + pub base_url: Option, +} + +// Methods associated with StableLoadUser +impl StableLoadUser { + + async fn index_model(&self, model_id: &StreamId) -> Result<()> { + let url = self.ceramic_client.admin_code_endpoint(); + let response = self.http_client.get(url).send().await?; + let resp: api::AdminCodeResponse = response.json().await?; + let req = self + .ceramic_client + .create_index_model_request(model_id, &resp.code) + .await + .unwrap(); + let resp = self.http_client.post(url).json(&req).send().await?; + + let admin_resp: api::AdminCodeResponse = resp.json().await?; + let url = self.ceramic_client.index_endpoint(); + let create_index_req = self + .ceramic_client + .create_index_model_request(model_id, &admin_resp.code) + .await + .unwrap(); + let resp = self.http_client.post(url).json(&create_index_req).send().await?; + if resp.status().is_success() { + Ok(()) + } else { + Err(anyhow::anyhow!("Failed to index model")) + } + } + + pub async fn generate_random_model(&self) -> Result { + let small_model = + ModelDefinition::new::("load_test_small_model", ModelAccountRelation::List) + .unwrap(); + self.setup_model(small_model).await + } + + async fn setup_model(&self, model: ModelDefinition) -> Result { + // I want the url to look something like https://localhost: + models endpoint + the url should have a base + let url = self + .build_url(&self.ceramic_client.streams_endpoint()) + .await + .unwrap(); + info!("URL: {}", url); + let req = self.ceramic_client.create_model_request(&model).await.unwrap(); + let req = self.http_client.post(url).json(&req); + let resp: reqwest::Response = req.send().await?; + if resp.status() == reqwest::StatusCode::OK { + let streams_response: api::StreamsResponse = resp.json().await?; + info!("Stream ID: {:?}", streams_response.stream_id); + Ok(streams_response.stream_id) + } else { + Err(anyhow::anyhow!( + "Failed to setup model: status {:?} , resp_text {:?}", + resp.status(), + resp.text().await + )) + } + } + // TODO : Write a setup function which creates the struct by accepting a targetPeerAddress and ceramicClient and returns a StabilityTestUtils + pub async fn setup_stability_test( + ceramic_client: CeramicClient, + base_url: Option, + ) -> StableLoadUser { + let http_client = Client::new(); + return StableLoadUser { + ceramic_client, + http_client, + throttle_rate: Duration::from_millis(100), + base_url, + }; + } + + pub async fn create_random_mid(&self, model: &StreamId) -> Result { + let data = SmallModel::random(); + return self.create_mid(model, &data).await; + } + + async fn create_mid(&self, model: &StreamId, data: &SmallModel) -> Result { + let url = self + .build_url(&self.ceramic_client.streams_endpoint()) + .await + .unwrap(); + let req = self + .ceramic_client + .create_list_instance_request(model, data) + .await + .unwrap(); + let req = self.http_client.post(url).json(&req); + let resp: reqwest::Response = req.send().await?; + if resp.status() == reqwest::StatusCode::OK { + let parsed_resp: api::StreamsResponse = resp.json().await?; + Ok(parsed_resp.stream_id) + } else { + Err(anyhow::anyhow!( + "Failed to create model: status {:?} , resp_text {:?}", + resp.status(), + resp.text().await + )) + } + } + + async fn build_url(&self, path: &str) -> Result { + let base = self + .base_url + .as_ref() + .ok_or_else(|| anyhow::anyhow!("Base URL is not set"))?; + let separator = if path.starts_with('/') || base.ends_with('/') { + "" + } else { + "/" + }; + let full_url = format!("{}{}{}", base, separator, path); + Ok(full_url) + } +} + diff --git a/runner/src/simulate.rs b/runner/src/simulate.rs index e9b58339..eaf2d50c 100644 --- a/runner/src/simulate.rs +++ b/runner/src/simulate.rs @@ -5,7 +5,9 @@ use std::{ }; use anyhow::{anyhow, bail, Result}; +use ceramic_core::StreamId; use clap::{Args, ValueEnum}; +use futures::future; use goose::{config::GooseConfiguration, prelude::GooseMetrics, GooseAttack}; use keramik_common::peer_info::Peer; use opentelemetry::{ @@ -14,12 +16,21 @@ use opentelemetry::{ KeyValue, }; use reqwest::Url; +use std::sync::atomic::{AtomicU64, Ordering}; +use tokio::sync::Semaphore; use tokio::time::sleep; +use tokio::time::Instant; use tracing::{error, info, warn}; +// Use the stability test utils +use crate::scenario::stability_test_utils; use crate::{ scenario::{ - ceramic::{self, new_streams}, + ceramic::{ + self, + model_instance::{CeramicModelInstanceTestUser, EnvBasedConfig}, + new_streams, + }, get_redis_client, ipfs_block_fetch, recon_sync, }, utils::{calculate_sample_size, parse_peers_info, select_sample_set_ids}, @@ -1045,7 +1056,42 @@ impl ScenarioState { #[tracing::instrument] pub async fn simulate(opts: Opts) -> Result { - let mut metrics = Metrics::init(&opts)?; + // let mut metrics = Metrics::init(&opts)?; + + // let metrics_collector = Box::new(PromMetricCollector { + // client: reqwest::Client::builder() + // .connect_timeout(Duration::from_secs(3)) + // .timeout(Duration::from_secs(10)) + // .build()?, + // }); + // let mut state: ScenarioState = ScenarioState::try_from_opts(opts, metrics_collector, None).await?; + // let scenario = state.build_goose_scenario().await?; + + // let start = std::time::Instant::now(); + // let goose_metrics = match GooseAttack::initialize_with_config(config)? + // .register_scenario(scenario) + // .execute() + // .await + // { + // Ok(m) => m, + // Err(e) => { + // error!("{:#?}", e); + // return Err(e.into()); + // } + // }; + // let elapsed = start.elapsed(); + + // let (success, peer_rps) = state + // .validate_scenario_success(&goose_metrics, elapsed) + // .await; + // metrics.record(goose_metrics, peer_rps); + + // TODO: Implement load generator that lasts for a week + simulate_for_week(opts).await +} + +pub async fn simulate_for_week(opts: Opts) -> Result { + let mut metrics: Metrics = Metrics::init(&opts)?; let metrics_collector = Box::new(PromMetricCollector { client: reqwest::Client::builder() @@ -1053,30 +1099,196 @@ pub async fn simulate(opts: Opts) -> Result { .timeout(Duration::from_secs(10)) .build()?, }); - let mut state = ScenarioState::try_from_opts(opts, metrics_collector, None).await?; - let scenario = state.build_goose_scenario().await?; - let config: GooseConfiguration = state.goose_config()?; - - let start = std::time::Instant::now(); - let goose_metrics = match GooseAttack::initialize_with_config(config)? - .register_scenario(scenario) - .execute() - .await - { - Ok(m) => m, - Err(e) => { - error!("{:#?}", e); - return Err(e.into()); + let state: ScenarioState = ScenarioState::try_from_opts(opts, metrics_collector, None).await?; + let config: EnvBasedConfig = + CeramicModelInstanceTestUser::prep_scenario(state.scenario.into()).await?; + + // TODO : Set up a ceramic client which is authenticated, or get it from somewhere + // TODO : Get address of the ceramic peer from somewhere + let peer_addr = state.target_peer_addr()?; + info!("Peer address: {:?}", peer_addr); + // TODO : Fet the address of the peer + // let peer_addr = peer.ceramic_addr().expect("Peer does not have a ceramic address"); + + let stable_load_user = stability_test_utils::StableLoadUser::setup_stability_test( + config.admin_cli, + Some(peer_addr), + ) + .await; + + // Create a model by calling generate random model + let model = stable_load_user.generate_random_model().await?; + let run_time: u64 = state.run_time.parse().expect("Failed to parse run_time as u64"); + + info!("Model: {:?}", model); + // create a model instance document on loop at 100 rps, by calling create random mid in a loop + let model_instance_creation_result = + create_model_instances_continuously(stable_load_user, model, run_time).await; + info!( + "Model instance creation result: {:?}", + model_instance_creation_result + ); + Ok(CommandResult::Success) +} + +pub async fn create_model_instances_continuously( + stable_load_user: stability_test_utils::StableLoadUser, + model: StreamId, + duration_in_minutes: u64, +) -> Result<()> { + let start_time = Instant::now(); + // Convert hours to seconds for the total duration + // let duration = Duration::from_secs(duration_in_hours * 3600); + let duration = Duration::from_secs(duration_in_minutes * 60); + let mut count = 0; + let mut errors = 0; + + // TODO : Do things with the channel to implement throttling + // TODO : Remove logs after 3 hour test + let (tx, mut rx) = tokio::sync::mpsc::channel(10000); + let mut tasks = tokio::task::JoinSet::new(); + for i in 0..5 { + let user_clone = stable_load_user.clone(); + let model = model.clone(); + let tx = tx.clone(); + // Wrap the future in a tokio timeout + tasks.spawn(async move { + loop { + if start_time.elapsed() > duration { + info!("loop {i} Duration expired"); + break; + } + match tokio::time::timeout( + Duration::from_millis(500), + user_clone.create_random_mid(&model), + ) + .await + { + Ok(Ok(mid)) => { + match tx.send(Ok(mid.to_string())).await { + Ok(_) => {} + Err(e) => { + error!("Failed to send MID: {}", e); + } + } + } + Ok(Err(e)) => { + match tx.send(Err(e.to_string())).await { + Ok(_) => {} + Err(e) => { + error!("Failed to send error: {}", e); + } + } + } + Err(e) => { + match tx.send(Err(e.to_string())).await { + Ok(_) => {} + Err(e) => { + error!("Failed to send error: {}", e); + } + } + } + } + } + }); + } + drop(tx); + loop { + let mut mid_vec: Vec> = Vec::new(); + if rx.recv_many(&mut mid_vec, 10).await > 0 { + for mid in mid_vec { + match mid { + Ok(_) => { + count += 1; + } + Err(_) => { + errors += 1; + } + } + } + } + if start_time.elapsed() > duration { + tasks.abort_all(); + break; } - }; - let elapsed = start.elapsed(); + } + info!("Created {} MIDs in {} minutes", count, duration_in_minutes); + info!( + "Failed to create {} MIDs in {} minutes", + errors, duration_in_minutes + ); + Ok(()) +} - let (success, peer_rps) = state - .validate_scenario_success(&goose_metrics, elapsed) - .await; - metrics.record(goose_metrics, peer_rps); +pub async fn create_model_instances_continuously_no_channel( + stable_load_user: stability_test_utils::StableLoadUser, + model: StreamId, + duration_in_hours: u64, + requests_per_second: u64, +) -> Result<()> { + let start_time = Instant::now(); + let duration = Duration::from_secs(duration_in_hours * 360); + let interval = Duration::from_micros(1_000_000 / requests_per_second); + + let count = Arc::new(AtomicU64::new(0)); + let errors = Arc::new(AtomicU64::new(0)); + let timeouts = Arc::new(AtomicU64::new(0)); + + let semaphore = Arc::new(Semaphore::new(requests_per_second as usize * 2)); // Allow some bursting + + while start_time.elapsed() < duration { + let permit = semaphore.clone().acquire_owned().await.unwrap(); + let user_clone = stable_load_user.clone(); + let model = model.clone(); + let count_clone = count.clone(); + let errors_clone = errors.clone(); + let timeouts_clone = timeouts.clone(); + + tokio::spawn(async move { + let _permit = permit; // Keep the permit until the task is done + match tokio::time::timeout( + Duration::from_secs(10), + user_clone.create_random_mid(&model), + ) + .await + { + Ok(Ok(mid)) => { + count_clone.fetch_add(1, Ordering::Relaxed); + info!("Created MID: {}", mid); + } + Ok(Err(e)) => { + errors_clone.fetch_add(1, Ordering::Relaxed); + error!("Failed to create MID: {}", e); + } + Err(_) => { + timeouts_clone.fetch_add(1, Ordering::Relaxed); + error!("Request timed out"); + } + } + }); + + tokio::time::sleep(interval).await; + } - Ok(success) + let total_count = count.load(Ordering::Relaxed); + let total_errors = errors.load(Ordering::Relaxed); + let total_timeouts = timeouts.load(Ordering::Relaxed); + let total_requests = total_count + total_errors + total_timeouts; + let elapsed = start_time.elapsed().as_secs_f64(); + + info!( + "Completed creating MIDs continuously for {} hours", + duration_in_hours + ); + info!("Created {} MIDs in {:.2} seconds", total_count, elapsed); + info!("Failed to create {} MIDs", total_errors); + info!("Request timed out {} times", total_timeouts); + info!( + "Actual request rate: {:.2} req/s", + total_requests as f64 / elapsed + ); + + Ok(()) } struct Metrics { @@ -1464,7 +1676,7 @@ mod test { let mut state = ScenarioState::try_from_opts(opts, Box::new(metrics_collector), Some(peers)) .await - .unwrap(); + .unwrap(); state.collect_before_metrics().await.unwrap(); state.validate_recon_scenario_success(run_time).await.0 From 9508b89ee660160b5f4ec65e644c108ecfc3e02d Mon Sep 17 00:00:00 2001 From: Mohsin Zaidi <2236875+smrz2001@users.noreply.github.com> Date: Mon, 24 Jun 2024 13:42:54 -0400 Subject: [PATCH 02/11] chore: reduce anchor batch size to 1 (#188) --- operator/src/network/cas.rs | 2 +- operator/src/network/testdata/default_stubs/cas_stateful_set | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/operator/src/network/cas.rs b/operator/src/network/cas.rs index b84c5cc6..c5e40cbe 100644 --- a/operator/src/network/cas.rs +++ b/operator/src/network/cas.rs @@ -481,7 +481,7 @@ pub fn cas_stateful_set_spec( }, EnvVar { name: "ANCHOR_BATCH_SIZE".to_owned(), - value: Some("20".to_owned()), + value: Some("1".to_owned()), ..Default::default() }, EnvVar { diff --git a/operator/src/network/testdata/default_stubs/cas_stateful_set b/operator/src/network/testdata/default_stubs/cas_stateful_set index d3d31411..03553c56 100644 --- a/operator/src/network/testdata/default_stubs/cas_stateful_set +++ b/operator/src/network/testdata/default_stubs/cas_stateful_set @@ -342,7 +342,7 @@ Request { }, { "name": "ANCHOR_BATCH_SIZE", - "value": "20" + "value": "1" }, { "name": "ANCHOR_BATCH_LINGER", From deb07eb2f3dfe48ecebdb745e625f8d1e05356d3 Mon Sep 17 00:00:00 2001 From: Nathaniel Cook Date: Tue, 25 Jun 2024 09:39:51 -0600 Subject: [PATCH 03/11] feat: make anchoring fast by default (#191) * feat: make anchoring fast by default When keramik is used to deploy CAS it is only in the context of a hermetic testing env. Therefore we can assume that anchoring should be as fast as possible in order to all for faster tests etc. Makes two changes: 1. Reduces worker delay to 1s instead of 1m after completing a batch. 2. Configures ganache to instantly mine any transaction. * tests --- operator/src/network/cas.rs | 7 ++++++- operator/src/network/controller.rs | 12 ++++++------ .../network/testdata/default_stubs/cas_stateful_set | 4 ++++ .../testdata/default_stubs/ganache_stateful_set | 2 +- 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/operator/src/network/cas.rs b/operator/src/network/cas.rs index c5e40cbe..4b9181bb 100644 --- a/operator/src/network/cas.rs +++ b/operator/src/network/cas.rs @@ -454,6 +454,11 @@ pub fn cas_stateful_set_spec( value: Some("false".to_owned()), ..Default::default() }, + EnvVar { + name: "SCHEDULER_INTERVAL_MS".to_owned(), + value: Some("1000".to_owned()), + ..Default::default() + }, ], ] .concat(), @@ -648,7 +653,7 @@ pub fn ganache_stateful_set_spec( command: Some([ "node", "/app/dist/node/cli.js", - "--miner.blockTime=5", + "--miner.blockTime=0", "--mnemonic='move sense much taxi wave hurry recall stairs thank brother nut woman'", "--networkId=5777", "-l=80000000", diff --git a/operator/src/network/controller.rs b/operator/src/network/controller.rs index 84eda162..53067ed0 100644 --- a/operator/src/network/controller.rs +++ b/operator/src/network/controller.rs @@ -2858,8 +2858,8 @@ mod tests { "name": "cas-api", "ports": [ { - @@ -272,8 +272,8 @@ - "value": "false" + @@ -276,8 +276,8 @@ + "value": "1000" } ], - "image": "ceramicnetwork/ceramic-anchor-service:latest", @@ -2869,7 +2869,7 @@ mod tests { "name": "cas-worker", "resources": { "limits": { - @@ -442,8 +442,8 @@ + @@ -446,8 +446,8 @@ "value": "dev" } ], @@ -2976,7 +2976,7 @@ mod tests { "ephemeral-storage": "1Gi", "memory": "1Gi" } - @@ -277,12 +277,12 @@ + @@ -281,12 +281,12 @@ "name": "cas-worker", "resources": { "limits": { @@ -2991,7 +2991,7 @@ mod tests { "ephemeral-storage": "1Gi", "memory": "1Gi" } - @@ -365,12 +365,12 @@ + @@ -369,12 +369,12 @@ "name": "cas-scheduler", "resources": { "limits": { @@ -3006,7 +3006,7 @@ mod tests { "ephemeral-storage": "1Gi", "memory": "1Gi" } - @@ -470,7 +470,7 @@ + @@ -474,7 +474,7 @@ ], "resources": { "requests": { diff --git a/operator/src/network/testdata/default_stubs/cas_stateful_set b/operator/src/network/testdata/default_stubs/cas_stateful_set index 03553c56..cbe4cf4b 100644 --- a/operator/src/network/testdata/default_stubs/cas_stateful_set +++ b/operator/src/network/testdata/default_stubs/cas_stateful_set @@ -270,6 +270,10 @@ Request { { "name": "SCHEDULER_STOP_AFTER_NO_OP", "value": "false" + }, + { + "name": "SCHEDULER_INTERVAL_MS", + "value": "1000" } ], "image": "ceramicnetwork/ceramic-anchor-service:latest", diff --git a/operator/src/network/testdata/default_stubs/ganache_stateful_set b/operator/src/network/testdata/default_stubs/ganache_stateful_set index e0c7ea0a..58ded89e 100644 --- a/operator/src/network/testdata/default_stubs/ganache_stateful_set +++ b/operator/src/network/testdata/default_stubs/ganache_stateful_set @@ -35,7 +35,7 @@ Request { "command": [ "node", "/app/dist/node/cli.js", - "--miner.blockTime=5", + "--miner.blockTime=0", "--mnemonic='move sense much taxi wave hurry recall stairs thank brother nut woman'", "--networkId=5777", "-l=80000000", From 8e9a4841107fd013d48113a576969d0ead583ca3 Mon Sep 17 00:00:00 2001 From: Mohsin Zaidi <2236875+smrz2001@users.noreply.github.com> Date: Tue, 25 Jun 2024 11:42:22 -0400 Subject: [PATCH 04/11] feat: expose c1 migration cmds (#186) * feat: expose c1 migration cmds * chore: build fixes * fix: use list of args for migration_cmd * fix: tests --------- Co-authored-by: Nathaniel Cook --- keramik/src/ipfs.md | 43 ++++++ operator/src/network/cas.rs | 1 + operator/src/network/ceramic.rs | 61 ++++---- operator/src/network/controller.rs | 145 +++++++++++++++++- operator/src/network/ipfs.rs | 30 +++- operator/src/network/spec.rs | 2 + runner/src/scenario/ceramic/model_instance.rs | 2 +- 7 files changed, 250 insertions(+), 34 deletions(-) diff --git a/keramik/src/ipfs.md b/keramik/src/ipfs.md index 7a3db292..f706a11b 100644 --- a/keramik/src/ipfs.md +++ b/keramik/src/ipfs.md @@ -169,3 +169,46 @@ spec: commands: - ipfs config --json Swarm.RelayClient.Enabled false ``` + +## Migration from Kubo to Ceramic One + +A Kubo blockstore can be migrated to Ceramic One by specifying the migration command in the IPFS configuration. + +Example [network config](./setup_network.md) that uses Go based IPFS (i.e. Kubo) with its defaults for Ceramic (including a default +blockstore path of `/data/ipfs`) and the Ceramic network set to `dev-unstable`. + +```yaml +apiVersion: "keramik.3box.io/v1alpha1" +kind: Network +metadata: + name: basic-network +spec: + replicas: 5 + ceramic: + - ipfs: + go: {} + networkType: dev-unstable +``` + +Example [network config](./setup_network.md) that uses Ceramic One and specifies what migration command to run before +starting up the node. + +```yaml +apiVersion: "keramik.3box.io/v1alpha1" +kind: Network +metadata: + name: basic-network +spec: + replicas: 5 + ceramic: + - ipfs: + rust: + migrationCmd: + - from-ipfs + - -i + - /data/ipfs/blocks + - -o + - /data/ipfs/ + - --network + - dev-unstable +``` diff --git a/operator/src/network/cas.rs b/operator/src/network/cas.rs index 4b9181bb..617e546b 100644 --- a/operator/src/network/cas.rs +++ b/operator/src/network/cas.rs @@ -600,6 +600,7 @@ pub fn cas_ipfs_stateful_set_spec( ..Default::default() }), spec: Some(PodSpec { + init_containers: config.ipfs.init_container(net_config).map(|c| vec![c]), containers: vec![config.ipfs.container(ipfs_info, net_config)], volumes: Some(volumes), ..Default::default() diff --git a/operator/src/network/ceramic.rs b/operator/src/network/ceramic.rs index 8f8487e8..43b9390e 100644 --- a/operator/src/network/ceramic.rs +++ b/operator/src/network/ceramic.rs @@ -664,35 +664,40 @@ pub fn stateful_set_spec(ns: &str, bundle: &CeramicBundle<'_>) -> StatefulSetSpe .ipfs .container(&bundle.info, bundle.net_config), ], - init_containers: Some(vec![Container { - command: Some(vec![ - "/bin/bash".to_owned(), - "-c".to_owned(), - "/ceramic-init/ceramic-init.sh".to_owned(), - ]), - env: Some(init_env), - image: Some(bundle.config.init_image_name.to_owned()), - image_pull_policy: Some(bundle.config.image_pull_policy.to_owned()), - name: "init-ceramic-config".to_owned(), - resources: Some(ResourceRequirements { - limits: Some(bundle.config.resource_limits.clone().into()), - requests: Some(bundle.config.resource_limits.clone().into()), - ..Default::default() - }), - volume_mounts: Some(vec![ - VolumeMount { - mount_path: "/config".to_owned(), - name: "config-volume".to_owned(), - ..Default::default() - }, - VolumeMount { - mount_path: "/ceramic-init".to_owned(), - name: "ceramic-init".to_owned(), + init_containers: Some( + vec![Container { + command: Some(vec![ + "/bin/bash".to_owned(), + "-c".to_owned(), + "/ceramic-init/ceramic-init.sh".to_owned(), + ]), + env: Some(init_env), + image: Some(bundle.config.init_image_name.to_owned()), + image_pull_policy: Some(bundle.config.image_pull_policy.to_owned()), + name: "init-ceramic-config".to_owned(), + resources: Some(ResourceRequirements { + limits: Some(bundle.config.resource_limits.clone().into()), + requests: Some(bundle.config.resource_limits.clone().into()), ..Default::default() - }, - ]), - ..Default::default() - }]), + }), + volume_mounts: Some(vec![ + VolumeMount { + mount_path: "/config".to_owned(), + name: "config-volume".to_owned(), + ..Default::default() + }, + VolumeMount { + mount_path: "/ceramic-init".to_owned(), + name: "ceramic-init".to_owned(), + ..Default::default() + }, + ]), + ..Default::default() + }] + .into_iter() + .chain(bundle.config.ipfs.init_container(bundle.net_config)) + .collect(), + ), volumes: Some(volumes), security_context: Some(PodSecurityContext { fs_group: Some(70), diff --git a/operator/src/network/controller.rs b/operator/src/network/controller.rs index 53067ed0..444fb051 100644 --- a/operator/src/network/controller.rs +++ b/operator/src/network/controller.rs @@ -861,8 +861,8 @@ async fn update_peer_status( for ceramic in ceramics { for i in 0..ceramic.info.replicas { let pod_name = ceramic.info.pod_name(i); - let pod = pods.get_status(&pod_name).await?; - if !is_pod_ready(&pod) { + let pod = pods.get_status(&pod_name).await; + if pod.map(|pod| !is_pod_ready(&pod)).unwrap_or(true) { debug!(pod_name, "peer is not ready skipping"); continue; } @@ -4700,4 +4700,145 @@ mod tests { .expect("reconciler"); timeout_after_1s(mocksrv).await; } + #[tokio::test] + #[traced_test] + async fn migration_cmd() { + // Setup network spec and status + let network = Network::test().with_spec(NetworkSpec { + ceramic: Some(vec![CeramicSpec { + ipfs: Some(IpfsSpec::Rust(RustIpfsSpec { + migration_cmd: Some( + vec![ + "from-ipfs", + "-i", + "/data/ipfs/blocks", + "-o", + "/data/ipfs/", + "--network", + "dev-unstable", + ] + .into_iter() + .map(ToOwned::to_owned) + .collect(), + ), + ..Default::default() + })), + ..Default::default() + }]), + ..Default::default() + }); + let mock_rpc_client = default_ipfs_rpc_mock(); + let mut stub = Stub::default().with_network(network.clone()); + stub.ceramics[0].stateful_set.patch(expect![[r#" + --- original + +++ modified + @@ -397,6 +397,95 @@ + "name": "ceramic-init" + } + ] + + }, + + { + + "command": [ + + "/usr/bin/ceramic-one", + + "migrations", + + "from-ipfs", + + "-i", + + "/data/ipfs/blocks", + + "-o", + + "/data/ipfs/", + + "--network", + + "dev-unstable" + + ], + + "env": [ + + { + + "name": "CERAMIC_ONE_BIND_ADDRESS", + + "value": "0.0.0.0:5001" + + }, + + { + + "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", + + "value": "1" + + }, + + { + + "name": "CERAMIC_ONE_KADEMLIA_REPLICATION", + + "value": "6" + + }, + + { + + "name": "CERAMIC_ONE_LOCAL_NETWORK_ID", + + "value": "0" + + }, + + { + + "name": "CERAMIC_ONE_METRICS_BIND_ADDRESS", + + "value": "0.0.0.0:9465" + + }, + + { + + "name": "CERAMIC_ONE_NETWORK", + + "value": "local" + + }, + + { + + "name": "CERAMIC_ONE_STORE_DIR", + + "value": "/data/ipfs" + + }, + + { + + "name": "CERAMIC_ONE_SWARM_ADDRESSES", + + "value": "/ip4/0.0.0.0/tcp/4001" + + }, + + { + + "name": "RUST_LOG", + + "value": "info,ceramic_one=debug,multipart=error" + + } + + ], + + "image": "public.ecr.aws/r5b3e0r5/3box/ceramic-one:latest", + + "imagePullPolicy": "Always", + + "name": "ipfs-migration", + + "ports": [ + + { + + "containerPort": 4001, + + "name": "swarm-tcp", + + "protocol": "TCP" + + }, + + { + + "containerPort": 5001, + + "name": "rpc", + + "protocol": "TCP" + + }, + + { + + "containerPort": 9465, + + "name": "metrics", + + "protocol": "TCP" + + } + + ], + + "resources": { + + "limits": { + + "cpu": "1", + + "ephemeral-storage": "1Gi", + + "memory": "1Gi" + + }, + + "requests": { + + "cpu": "1", + + "ephemeral-storage": "1Gi", + + "memory": "1Gi" + + } + + }, + + "volumeMounts": [ + + { + + "mountPath": "/data/ipfs", + + "name": "ipfs-data" + + } + + ] + } + ], + "securityContext": { + "#]]); + stub.cas_ipfs_stateful_set.patch(expect![[r#" + --- original + +++ modified + "#]]); + let (testctx, api_handle) = Context::test(mock_rpc_client); + let fakeserver = ApiServerVerifier::new(api_handle); + let mocksrv = stub.run(fakeserver); + reconcile(Arc::new(network), testctx) + .await + .expect("reconciler"); + timeout_after_1s(mocksrv).await; + } } diff --git a/operator/src/network/ipfs.rs b/operator/src/network/ipfs.rs index a3562a95..7c0c6228 100644 --- a/operator/src/network/ipfs.rs +++ b/operator/src/network/ipfs.rs @@ -9,6 +9,7 @@ use k8s_openapi::{ }; const IPFS_CONTAINER_NAME: &str = "ipfs"; +const IPFS_STORE_DIR: &str = "/data/ipfs"; pub const IPFS_DATA_PV_CLAIM: &str = "ipfs-data"; const IPFS_SERVICE_PORT: i32 = 5001; @@ -69,6 +70,12 @@ impl IpfsConfig { IpfsConfig::Go(config) => config.config_maps(&info), } } + pub fn init_container(&self, net_config: &NetworkConfig) -> Option { + match self { + IpfsConfig::Rust(config) => config.init_container(net_config), + _ => None, + } + } pub fn container(&self, info: impl Into, net_config: &NetworkConfig) -> Container { let info = info.into(); match self { @@ -98,6 +105,7 @@ pub struct RustIpfsConfig { storage: PersistentStorageConfig, rust_log: String, env: Option>, + migration_cmd: Option>, } impl RustIpfsConfig { @@ -129,6 +137,7 @@ impl Default for RustIpfsConfig { }, rust_log: "info,ceramic_one=debug,multipart=error".to_owned(), env: None, + migration_cmd: None, } } } @@ -149,6 +158,7 @@ impl From for RustIpfsConfig { storage: PersistentStorageConfig::from_spec(value.storage, default.storage), rust_log: value.rust_log.unwrap_or(default.rust_log), env: value.env, + migration_cmd: value.migration_cmd, } } } @@ -177,7 +187,7 @@ impl RustIpfsConfig { }, EnvVar { name: "CERAMIC_ONE_STORE_DIR".to_owned(), - value: Some("/data/ipfs".to_owned()), + value: Some(IPFS_STORE_DIR.to_owned()), ..Default::default() }, EnvVar { @@ -253,7 +263,7 @@ impl RustIpfsConfig { ..Default::default() }), volume_mounts: Some(vec![VolumeMount { - mount_path: "/data/ipfs".to_owned(), + mount_path: IPFS_STORE_DIR.to_owned(), name: IPFS_DATA_PV_CLAIM.to_owned(), ..Default::default() }]), @@ -261,6 +271,20 @@ impl RustIpfsConfig { ..Default::default() } } + + fn init_container(&self, net_config: &NetworkConfig) -> Option { + self.migration_cmd.as_ref().map(|cmd| Container { + name: "ipfs-migration".to_string(), + command: Some( + vec!["/usr/bin/ceramic-one", "migrations"] + .into_iter() + .chain(cmd.iter().map(String::as_str)) + .map(ToOwned::to_owned) + .collect(), + ), + ..self.container(net_config) + }) + } } pub struct GoIpfsConfig { @@ -364,7 +388,7 @@ ipfs config --json Swarm.ResourceMgr.MaxFileDescriptors 500000 fn container(&self, info: &IpfsInfo) -> Container { let mut volume_mounts = vec![ VolumeMount { - mount_path: "/data/ipfs".to_owned(), + mount_path: IPFS_STORE_DIR.to_owned(), name: IPFS_DATA_PV_CLAIM.to_owned(), ..Default::default() }, diff --git a/operator/src/network/spec.rs b/operator/src/network/spec.rs index 66b68852..f19a4e79 100644 --- a/operator/src/network/spec.rs +++ b/operator/src/network/spec.rs @@ -204,6 +204,8 @@ pub struct RustIpfsSpec { /// Extra env values to pass to the image. /// CAUTION: Any env vars specified in this set will override any predefined values. pub env: Option>, + /// Migration command that should run before a node comes up + pub migration_cmd: Option>, } /// Describes how the Go IPFS node for a peer should behave. diff --git a/runner/src/scenario/ceramic/model_instance.rs b/runner/src/scenario/ceramic/model_instance.rs index 292a37c0..220d4fd3 100644 --- a/runner/src/scenario/ceramic/model_instance.rs +++ b/runner/src/scenario/ceramic/model_instance.rs @@ -258,8 +258,8 @@ impl CeramicModelInstanceTestUser { config, user_info: GooseUserInfo { lead_user, - global_leader, lead_worker: is_goose_lead_worker(), + global_leader, }, small_model_id, small_model_instance_ids, From 78b972b604c1b09c42c6a0ef4abc8a6bca0b6c58 Mon Sep 17 00:00:00 2001 From: Nathaniel Cook Date: Wed, 26 Jun 2024 08:58:17 -0600 Subject: [PATCH 05/11] fix: use smallest time for block time (#193) Using the instamine option of ganache confuses the CAS worker and it waits forever to see the transaction has been mined. This change uses a block time of 1 second, however in practice this appears to mean a block every 15 seconds. Faster than once a minute but still not very fast. --- operator/src/network/cas.rs | 4 ++-- .../src/network/testdata/default_stubs/ganache_stateful_set | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/operator/src/network/cas.rs b/operator/src/network/cas.rs index 617e546b..d378c250 100644 --- a/operator/src/network/cas.rs +++ b/operator/src/network/cas.rs @@ -654,11 +654,11 @@ pub fn ganache_stateful_set_spec( command: Some([ "node", "/app/dist/node/cli.js", - "--miner.blockTime=0", + "--miner.blockTime=1", "--mnemonic='move sense much taxi wave hurry recall stairs thank brother nut woman'", "--networkId=5777", "-l=80000000", - "--quiet", + "-v", ].map(String::from).to_vec()), image: Some("trufflesuite/ganache".to_owned()), image_pull_policy: Some("IfNotPresent".to_owned()), diff --git a/operator/src/network/testdata/default_stubs/ganache_stateful_set b/operator/src/network/testdata/default_stubs/ganache_stateful_set index 58ded89e..4f4c87be 100644 --- a/operator/src/network/testdata/default_stubs/ganache_stateful_set +++ b/operator/src/network/testdata/default_stubs/ganache_stateful_set @@ -35,11 +35,11 @@ Request { "command": [ "node", "/app/dist/node/cli.js", - "--miner.blockTime=0", + "--miner.blockTime=1", "--mnemonic='move sense much taxi wave hurry recall stairs thank brother nut woman'", "--networkId=5777", "-l=80000000", - "--quiet" + "-v" ], "image": "trufflesuite/ganache", "imagePullPolicy": "IfNotPresent", From 27fab9bdf339ca2c8a5a02874def216b7e8eea97 Mon Sep 17 00:00:00 2001 From: Nathaniel Cook Date: Wed, 26 Jun 2024 08:58:31 -0600 Subject: [PATCH 06/11] chore: update semantic commits to be consistent with other repos (#192) --- .github/workflows/pr.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 652c34b7..934f8026 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -8,3 +8,8 @@ on: jobs: semantic: uses: influxdata/validate-semantic-github-messages/.github/workflows/semantic.yml@main + with: + # When true: + # If there is one commit, only validate its commit message (and not the PR title). + # Else validate PR title only (and skip commit messages). + CHECK_PR_TITLE_OR_ONE_COMMIT: true From cee25d96ef18fa3c5f0d542526b78d8af4b2af81 Mon Sep 17 00:00:00 2001 From: David Estes <5317198+dav1do@users.noreply.github.com> Date: Thu, 27 Jun 2024 09:51:04 -0600 Subject: [PATCH 07/11] chore: update ceramic http client and fix recon sync test (#189) --- Cargo.lock | 22 +++++++----- runner/Cargo.toml | 3 +- runner/src/scenario/ceramic/anchor.rs | 18 ++++++---- runner/src/scenario/ceramic/mod.rs | 2 +- runner/src/scenario/ceramic/model_instance.rs | 1 - runner/src/scenario/recon_sync.rs | 11 +++--- runner/src/scenario/util.rs | 36 +++++++------------ 7 files changed, 44 insertions(+), 49 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 319ecef3..66b61d59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -703,11 +703,10 @@ dependencies = [ [[package]] name = "ceramic-core" -version = "0.18.0" -source = "git+https://github.com/ceramicnetwork/rust-ceramic.git?branch=main#c803ac49ec7b136a3aca42927c5696649238841a" +version = "0.24.0" +source = "git+https://github.com/ceramicnetwork/rust-ceramic.git?branch=main#8333cc9021f54bab1b941712fce5e2fdae60787a" dependencies = [ "anyhow", - "async-trait", "base64 0.21.7", "cid 0.11.1", "did-method-key", @@ -731,23 +730,28 @@ dependencies = [ [[package]] name = "ceramic-event" -version = "0.18.0" -source = "git+https://github.com/ceramicnetwork/rust-ceramic.git?branch=main#c803ac49ec7b136a3aca42927c5696649238841a" +version = "0.24.0" +source = "git+https://github.com/ceramicnetwork/rust-ceramic.git?branch=main#8333cc9021f54bab1b941712fce5e2fdae60787a" dependencies = [ "anyhow", - "async-trait", + "base64 0.21.7", "ceramic-core", "cid 0.11.1", "ipld-core", + "iroh-car", "multihash-codetable", "serde", + "serde_ipld_dagcbor", "serde_json", + "ssi", + "tokio", + "tracing", ] [[package]] name = "ceramic-http-client" version = "0.1.0" -source = "git+https://github.com/3box/ceramic-http-client-rs.git?branch=main#ffa148277f686b694100430f0125e65169c9f548" +source = "git+https://github.com/3box/ceramic-http-client-rs.git?branch=main#536203fb3c71ab2cc1f73e70bb838b372ef12453" dependencies = [ "anyhow", "ceramic-event", @@ -2456,8 +2460,8 @@ dependencies = [ [[package]] name = "iroh-car" -version = "0.18.0" -source = "git+https://github.com/ceramicnetwork/rust-ceramic.git?branch=main#c803ac49ec7b136a3aca42927c5696649238841a" +version = "0.24.0" +source = "git+https://github.com/ceramicnetwork/rust-ceramic.git?branch=main#8333cc9021f54bab1b941712fce5e2fdae60787a" dependencies = [ "cid 0.11.1", "futures", diff --git a/runner/Cargo.toml b/runner/Cargo.toml index 415a1860..f129dc65 100644 --- a/runner/Cargo.toml +++ b/runner/Cargo.toml @@ -11,7 +11,6 @@ async-trait.workspace = true ceramic-core.workspace = true iroh-car.workspace = true ceramic-http-client = { git = "https://github.com/3box/ceramic-http-client-rs.git", branch = "main", default-features = false } -#ceramic-http-client = { path = "../../ceramic-http-client-rs", default-features = false } clap.workspace = true did-method-key = "0.2" futures = "0.3" @@ -26,7 +25,7 @@ opentelemetry.workspace = true rand = "0.8.5" redis = { version = "0.24", features = ["tokio-comp"] } reqwest.workspace = true -serde = { version = "1.0", features = ["derive"] } +serde = { version = "1.0", features = ["derive"] } serde_ipld_dagcbor = "0.6" serde_ipld_dagjson = "0.2" schemars.workspace = true diff --git a/runner/src/scenario/ceramic/anchor.rs b/runner/src/scenario/ceramic/anchor.rs index 4c83a335..ead72b1a 100644 --- a/runner/src/scenario/ceramic/anchor.rs +++ b/runner/src/scenario/ceramic/anchor.rs @@ -1,6 +1,9 @@ use anyhow::Result; use ceramic_core::{Cid, DagCborEncoded}; -use ceramic_http_client::ceramic_event::{DidDocument, JwkSigner, Jws, StreamId}; +use ceramic_http_client::{ + ceramic_event::{unvalidated, DidDocument, StreamId}, + JwsBuilder, +}; use chrono::Utc; use goose::prelude::*; use ipld_core::ipld; @@ -32,10 +35,13 @@ async fn auth_header(url: String, controller: String, digest: Cid) -> Result Result<(Cid, Vec)> { let root_block = ipld!({ "timestamp": Utc::now().to_rfc3339(), - "streamId": stream_id.to_vec()?, + "streamId": stream_id.to_vec(), "tip": genesis_cid, }); diff --git a/runner/src/scenario/ceramic/mod.rs b/runner/src/scenario/ceramic/mod.rs index 0947d4d3..fced131c 100644 --- a/runner/src/scenario/ceramic/mod.rs +++ b/runner/src/scenario/ceramic/mod.rs @@ -7,7 +7,7 @@ pub mod simple; pub mod write_only; use ceramic_core::ssi::did::{DIDMethod, Document, DocumentBuilder, Source}; use ceramic_core::ssi::jwk::{self, Base64urlUInt, Params, JWK}; -use ceramic_http_client::ceramic_event::JwkSigner; +use ceramic_http_client::ceramic_event::unvalidated::signed::JwkSigner; use ceramic_http_client::CeramicHttpClient; use models::RandomModelInstance; diff --git a/runner/src/scenario/ceramic/model_instance.rs b/runner/src/scenario/ceramic/model_instance.rs index 220d4fd3..dbd39a8b 100644 --- a/runner/src/scenario/ceramic/model_instance.rs +++ b/runner/src/scenario/ceramic/model_instance.rs @@ -688,7 +688,6 @@ impl ModelInstanceRequests { let name = format!("create_index_model_{}", tx_name); let req = cli .create_index_model_request(model_id, &resp.code) - .await .unwrap(); let mut goose = user .request( diff --git a/runner/src/scenario/recon_sync.rs b/runner/src/scenario/recon_sync.rs index 9630696e..49801800 100644 --- a/runner/src/scenario/recon_sync.rs +++ b/runner/src/scenario/recon_sync.rs @@ -120,17 +120,17 @@ async fn create_new_event(user: &mut GooseUser) -> TransactionResult { let user_data: &ReconCeramicModelInstanceTestUser = user .get_session_data() .expect("we are missing sync_event_id user data"); - let data = random_init_event_car( - SORT_KEY, - user_data.model_id.to_vec().unwrap(), + let event = random_init_event_car( + user_data.model_id.to_vec(), Some(TEST_CONTROLLER.to_string()), ) .await .unwrap(); - let event_key_body = serde_json::json!({"data": data}); // eventId needs to be a multibase encoded string for the API to accept it - let cnt = NEW_EVENT_CNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + let event_key_body = serde_json::json!({ + "data": event, + }); if cnt == 0 || cnt % 1000 == 0 { tracing::trace!("new sync_event_id body: {:?}", event_key_body); @@ -150,7 +150,6 @@ async fn create_new_event(user: &mut GooseUser) -> TransactionResult { } } -const SORT_KEY: &str = "model"; // hard code test controller in case we want to find/prune later const TEST_CONTROLLER: &str = "did:key:z6MkoFUppcKEVYTS8oVidrja94UoJTatNhnhxJRKF7NYPScS"; diff --git a/runner/src/scenario/util.rs b/runner/src/scenario/util.rs index a603356d..21fee1a4 100644 --- a/runner/src/scenario/util.rs +++ b/runner/src/scenario/util.rs @@ -1,8 +1,8 @@ use std::io::Write; -use ceramic_core::{Cid, DagCborEncoded}; +use ceramic_core::{Cid, DagCborEncoded, MultiBase32String}; +use ceramic_http_client::ceramic_event::unvalidated; use goose::GooseError; -use ipld_core::ipld; use multihash_codetable::{Code, MultihashDigest}; use rand::{distributions::Alphanumeric, thread_rng, Rng}; use unsigned_varint::encode; @@ -43,7 +43,7 @@ pub fn create_stream() -> anyhow::Result<( .map(char::from) .collect(); - let genesis_commit = ipld!({ + let genesis_commit = ipld_core::ipld!({ "header": { "unique": gen_rand_bytes::<12>().as_slice(), "controllers": [controller] @@ -73,10 +73,9 @@ pub fn write_stream_bytes(cid: &Cid) -> anyhow::Result> { } pub(crate) async fn random_init_event_car( - sort_key: &str, model: Vec, controller: Option, -) -> anyhow::Result { +) -> Result { let controller = if let Some(owner) = controller { owner } else { @@ -88,25 +87,14 @@ pub(crate) async fn random_init_event_car( }; let unique = gen_rand_bytes::<12>(); - let init = ipld!({ - "header": { - "controllers": [controller], - "model": model, - "sep": sort_key, - "unique": unique.as_slice(), - } - }); - - let block = DagCborEncoded::new(&init)?; - let cid = Cid::new_v1(DAG_CBOR_CODEC, Code::Sha2_256.digest(block.as_ref())); - - let mut buf = Vec::new(); - let roots = vec![cid]; - let mut writer = iroh_car::CarWriter::new(iroh_car::CarHeader::V1(roots.into()), &mut buf); - writer.write(cid, block).await?; - writer.finish().await.unwrap(); - - Ok(multibase::encode(multibase::Base::Base36Lower, buf)) + let res = unvalidated::Builder::init() + .with_controller(controller) + .with_sep("model".to_string(), model) + .with_unique(unique.to_vec()) + .with_data(ipld_core::ipld!({"a": 1, "b": 2})) + .build(); + let car = res.encode_car().await?; + Ok(MultiBase32String::from(car)) } fn gen_rand_bytes() -> [u8; SIZE] { From d85f856535763492c0e0d1d9ba9e643f8761defe Mon Sep 17 00:00:00 2001 From: David Estes <5317198+dav1do@users.noreply.github.com> Date: Tue, 2 Jul 2024 12:58:23 -0600 Subject: [PATCH 08/11] chore: update ceramic one swarm port to 4101 and rpc port to 5101 (#194) * chore: update ceramic one swarm port to 4101 and rpc port to 5101 * chore: update CAS ipfs ports as well also renamed some constants * chore: fmt --- operator/src/network/cas.rs | 8 ++-- operator/src/network/ceramic.rs | 17 ++++---- operator/src/network/controller.rs | 37 ++++++++-------- operator/src/network/ipfs.rs | 20 +++++---- operator/src/network/testdata/ceramic_go_ss_1 | 8 ++-- .../src/network/testdata/ceramic_go_svc_1 | 4 +- operator/src/network/testdata/ceramic_ss_1 | 12 +++--- .../network/testdata/ceramic_ss_weighted_0 | 12 +++--- .../network/testdata/ceramic_ss_weighted_1 | 12 +++--- .../network/testdata/ceramic_ss_weighted_2 | 12 +++--- .../network/testdata/ceramic_ss_weighted_3 | 12 +++--- .../network/testdata/ceramic_ss_weighted_4 | 12 +++--- .../network/testdata/ceramic_ss_weighted_5 | 12 +++--- .../network/testdata/ceramic_ss_weighted_6 | 12 +++--- .../network/testdata/ceramic_ss_weighted_7 | 12 +++--- .../network/testdata/ceramic_ss_weighted_8 | 12 +++--- .../network/testdata/ceramic_ss_weighted_9 | 12 +++--- operator/src/network/testdata/ceramic_svc_1 | 4 +- .../network/testdata/ceramic_svc_weighted_0 | 4 +- .../network/testdata/ceramic_svc_weighted_1 | 4 +- .../network/testdata/ceramic_svc_weighted_2 | 4 +- .../network/testdata/ceramic_svc_weighted_3 | 4 +- .../network/testdata/ceramic_svc_weighted_4 | 4 +- .../network/testdata/ceramic_svc_weighted_5 | 4 +- .../network/testdata/ceramic_svc_weighted_6 | 4 +- .../network/testdata/ceramic_svc_weighted_7 | 4 +- .../network/testdata/ceramic_svc_weighted_8 | 4 +- .../network/testdata/ceramic_svc_weighted_9 | 4 +- .../network/testdata/ceramic_weighted_peers | 2 +- .../network/testdata/ceramics_weighted_status | 42 +++++++++---------- .../testdata/default_stubs/cas_ipfs_service | 4 +- .../default_stubs/cas_ipfs_stateful_set | 8 ++-- .../testdata/default_stubs/cas_stateful_set | 2 +- .../testdata/default_stubs/ceramic_service | 4 +- .../default_stubs/ceramic_stateful_set | 12 +++--- .../src/network/testdata/go_ipfs_configmap | 2 +- .../src/network/testdata/go_ipfs_configmap_1 | 2 +- .../testdata/go_ipfs_configmap_commands | 2 +- 38 files changed, 178 insertions(+), 172 deletions(-) diff --git a/operator/src/network/cas.rs b/operator/src/network/cas.rs index d378c250..c3a6d08a 100644 --- a/operator/src/network/cas.rs +++ b/operator/src/network/cas.rs @@ -20,7 +20,7 @@ use crate::{ network::{ ceramic::NetworkConfig, controller::{ - CAS_APP, CAS_IPFS_APP, CAS_IPFS_SERVICE_NAME, CAS_POSTGRES_APP, + CAS_APP, CAS_IPFS_APP, CAS_IPFS_SERVICE_NAME, CAS_IPFS_SERVICE_PORT, CAS_POSTGRES_APP, CAS_POSTGRES_SECRET_NAME, CAS_POSTGRES_SERVICE_NAME, CAS_SERVICE_NAME, DEFAULT_METRICS_PORT, GANACHE_APP, GANACHE_SERVICE_NAME, LOCALSTACK_APP, LOCALSTACK_SERVICE_NAME, NETWORK_DEV_MODE_RESOURCES, @@ -426,7 +426,7 @@ pub fn cas_stateful_set_spec( }, EnvVar { name: "IPFS_API_URL".to_owned(), - value: Some(format!("http://{CAS_IPFS_SERVICE_NAME}:5001")), + value: Some(format!("http://{CAS_IPFS_SERVICE_NAME}:{CAS_IPFS_SERVICE_PORT}")), ..Default::default() }, EnvVar { @@ -621,9 +621,9 @@ pub fn cas_ipfs_service_spec() -> ServiceSpec { ServiceSpec { ports: Some(vec![ServicePort { name: Some("cas-ipfs".to_owned()), - port: 5001, + port: CAS_IPFS_SERVICE_PORT, protocol: Some("TCP".to_owned()), - target_port: Some(IntOrString::Int(5001)), + target_port: Some(IntOrString::Int(CAS_IPFS_SERVICE_PORT)), ..Default::default() }]), selector: selector_labels(CAS_IPFS_APP), diff --git a/operator/src/network/ceramic.rs b/operator/src/network/ceramic.rs index 43b9390e..be8f8bdd 100644 --- a/operator/src/network/ceramic.rs +++ b/operator/src/network/ceramic.rs @@ -20,8 +20,8 @@ use crate::{ labels::{managed_labels, selector_labels}, network::{ controller::{ - CAS_SERVICE_NAME, CERAMIC_APP, CERAMIC_POSTGRES_SECRET_NAME, CERAMIC_SERVICE_API_PORT, - CERAMIC_SERVICE_IPFS_PORT, DEFAULT_METRICS_PORT, GANACHE_SERVICE_NAME, + CAS_SERVICE_NAME, CERAMIC_APP, CERAMIC_ONE_IPFS_PORT, CERAMIC_POSTGRES_SECRET_NAME, + CERAMIC_SERVICE_API_PORT, DEFAULT_METRICS_PORT, GANACHE_SERVICE_NAME, INIT_CONFIG_MAP_NAME, NETWORK_DEV_MODE_RESOURCES, NODE_INSPECTION_PORT, }, datadog::DataDogConfig, @@ -33,7 +33,10 @@ use crate::{ utils::override_and_sort_env_vars, }; -use super::{debug_mode_security_context, storage::PersistentStorageConfig}; +use super::{ + controller::CERAMIC_ONE_SWARM_PORT, debug_mode_security_context, + storage::PersistentStorageConfig, +}; pub fn config_maps( info: &CeramicInfo, @@ -115,13 +118,13 @@ pub fn service_spec() -> ServiceSpec { ..Default::default() }, ServicePort { - port: CERAMIC_SERVICE_IPFS_PORT, + port: CERAMIC_ONE_IPFS_PORT, name: Some("ipfs".to_owned()), protocol: Some("TCP".to_owned()), ..Default::default() }, ServicePort { - port: 4001, + port: CERAMIC_ONE_SWARM_PORT, name: Some("swarm-tcp".to_owned()), protocol: Some("TCP".to_owned()), ..Default::default() @@ -236,7 +239,7 @@ impl CeramicInfo { /// Determine the IPFS RPC address of a Ceramic peer pub fn ipfs_rpc_addr(&self, ns: &str, peer: i32) -> String { format!( - "http://{}-{peer}.{}.{ns}.svc.cluster.local:{CERAMIC_SERVICE_IPFS_PORT}", + "http://{}-{peer}.{}.{ns}.svc.cluster.local:{CERAMIC_ONE_IPFS_PORT}", self.stateful_set, self.service ) } @@ -360,7 +363,7 @@ pub fn stateful_set_spec(ns: &str, bundle: &CeramicBundle<'_>) -> StatefulSetSpe }, EnvVar { name: "CERAMIC_IPFS_HOST".to_owned(), - value: Some(format!("http://localhost:{CERAMIC_SERVICE_IPFS_PORT}")), + value: Some(format!("http://localhost:{CERAMIC_ONE_IPFS_PORT}")), ..Default::default() }, EnvVar { diff --git a/operator/src/network/controller.rs b/operator/src/network/controller.rs index 444fb051..067fbd0a 100644 --- a/operator/src/network/controller.rs +++ b/operator/src/network/controller.rs @@ -71,7 +71,8 @@ use crate::{ /// network. pub const PEERS_CONFIG_MAP_NAME: &str = "keramik-peers"; -pub const CERAMIC_SERVICE_IPFS_PORT: i32 = 5001; +pub const CERAMIC_ONE_SWARM_PORT: i32 = 4101; +pub const CERAMIC_ONE_IPFS_PORT: i32 = 5101; pub const CERAMIC_SERVICE_API_PORT: i32 = 7007; pub const CERAMIC_POSTGRES_SECRET_NAME: &str = "ceramic-postgres-auth"; @@ -80,7 +81,7 @@ pub const ADMIN_SECRET_NAME: &str = "ceramic-admin"; pub const CAS_SERVICE_NAME: &str = "cas"; pub const CAS_IPFS_SERVICE_NAME: &str = "cas-ipfs"; -pub const CAS_SERVICE_IPFS_PORT: i32 = 5001; +pub const CAS_IPFS_SERVICE_PORT: i32 = 5101; pub const CAS_POSTGRES_SERVICE_NAME: &str = "cas-postgres"; pub const CAS_POSTGRES_SECRET_NAME: &str = "postgres-auth"; pub const GANACHE_SERVICE_NAME: &str = "ganache"; @@ -890,7 +891,7 @@ async fn update_peer_status( let network_config: NetworkConfig = network.spec().into(); if network_config.network_type == NetworkType::Local { // CAS IPFS peer - let ipfs_rpc_addr = format!("http://{CAS_IPFS_SERVICE_NAME}-0.{CAS_IPFS_SERVICE_NAME}.{ns}.svc.cluster.local:{CAS_SERVICE_IPFS_PORT}"); + let ipfs_rpc_addr = format!("http://{CAS_IPFS_SERVICE_NAME}-0.{CAS_IPFS_SERVICE_NAME}.{ns}.svc.cluster.local:{CAS_IPFS_SERVICE_PORT}"); match cx.rpc_client.peer_info(&ipfs_rpc_addr).await { Ok(info) => { status.peers.push(Peer::Ipfs(info)); @@ -2275,7 +2276,7 @@ mod tests { - "env": [ - { - "name": "CERAMIC_ONE_BIND_ADDRESS", - - "value": "0.0.0.0:5001" + - "value": "0.0.0.0:5101" - }, - { - "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -2303,7 +2304,7 @@ mod tests { - }, - { - "name": "CERAMIC_ONE_SWARM_ADDRESSES", - - "value": "/ip4/0.0.0.0/tcp/4001" + - "value": "/ip4/0.0.0.0/tcp/4101" - }, - { - "name": "RUST_LOG", @@ -2427,7 +2428,7 @@ mod tests { - "env": [ - { - "name": "CERAMIC_ONE_BIND_ADDRESS", - - "value": "0.0.0.0:5001" + - "value": "0.0.0.0:5101" - }, - { - "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -2455,7 +2456,7 @@ mod tests { - }, - { - "name": "CERAMIC_ONE_SWARM_ADDRESSES", - - "value": "/ip4/0.0.0.0/tcp/4001" + - "value": "/ip4/0.0.0.0/tcp/4101" - }, - { - "name": "RUST_LOG", @@ -2584,7 +2585,7 @@ mod tests { - "env": [ - { - "name": "CERAMIC_ONE_BIND_ADDRESS", - - "value": "0.0.0.0:5001" + - "value": "0.0.0.0:5101" - }, - { - "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -2612,7 +2613,7 @@ mod tests { - }, - { - "name": "CERAMIC_ONE_SWARM_ADDRESSES", - - "value": "/ip4/0.0.0.0/tcp/4001" + - "value": "/ip4/0.0.0.0/tcp/4101" - }, - { - "name": "RUST_LOG", @@ -2749,7 +2750,7 @@ mod tests { "value": "0.0.0.0:9465" }, @@ -252,11 +256,19 @@ - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { + "name": "ENV_KEY_A", @@ -4185,7 +4186,7 @@ mod tests { - "env": [ - { - "name": "CERAMIC_ONE_BIND_ADDRESS", - - "value": "0.0.0.0:5001" + - "value": "0.0.0.0:5101" - }, - { - "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -4213,7 +4214,7 @@ mod tests { - }, - { - "name": "CERAMIC_ONE_SWARM_ADDRESSES", - - "value": "/ip4/0.0.0.0/tcp/4001" + - "value": "/ip4/0.0.0.0/tcp/4101" - }, - { - "name": "RUST_LOG", @@ -4547,7 +4548,7 @@ mod tests { { "mountPath": "/config", @@ -252,6 +268,10 @@ - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { + "name": "CERAMIC_ONE_TOKIO_CONSOLE", @@ -4599,7 +4600,7 @@ mod tests { --- original +++ modified @@ -69,6 +69,10 @@ - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { + "name": "CERAMIC_ONE_TOKIO_CONSOLE", @@ -4752,7 +4753,7 @@ mod tests { + "env": [ + { + "name": "CERAMIC_ONE_BIND_ADDRESS", - + "value": "0.0.0.0:5001" + + "value": "0.0.0.0:5101" + }, + { + "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -4780,7 +4781,7 @@ mod tests { + }, + { + "name": "CERAMIC_ONE_SWARM_ADDRESSES", - + "value": "/ip4/0.0.0.0/tcp/4001" + + "value": "/ip4/0.0.0.0/tcp/4101" + }, + { + "name": "RUST_LOG", @@ -4792,12 +4793,12 @@ mod tests { + "name": "ipfs-migration", + "ports": [ + { - + "containerPort": 4001, + + "containerPort": 4101, + "name": "swarm-tcp", + "protocol": "TCP" + }, + { - + "containerPort": 5001, + + "containerPort": 5101, + "name": "rpc", + "protocol": "TCP" + }, diff --git a/operator/src/network/ipfs.rs b/operator/src/network/ipfs.rs index 7c0c6228..b263a85f 100644 --- a/operator/src/network/ipfs.rs +++ b/operator/src/network/ipfs.rs @@ -11,7 +11,6 @@ use k8s_openapi::{ const IPFS_CONTAINER_NAME: &str = "ipfs"; const IPFS_STORE_DIR: &str = "/data/ipfs"; pub const IPFS_DATA_PV_CLAIM: &str = "ipfs-data"; -const IPFS_SERVICE_PORT: i32 = 5001; use crate::{ network::{ @@ -22,7 +21,10 @@ use crate::{ utils::override_and_sort_env_vars, }; -use super::debug_mode_security_context; +use super::{ + controller::{CERAMIC_ONE_IPFS_PORT, CERAMIC_ONE_SWARM_PORT}, + debug_mode_security_context, +}; /// Unique identifying information about this IPFS spec. #[derive(Debug, Clone)] @@ -172,7 +174,7 @@ impl RustIpfsConfig { }, EnvVar { name: "CERAMIC_ONE_BIND_ADDRESS".to_owned(), - value: Some(format!("0.0.0.0:{IPFS_SERVICE_PORT}")), + value: Some(format!("0.0.0.0:{CERAMIC_ONE_IPFS_PORT}")), ..Default::default() }, EnvVar { @@ -182,7 +184,7 @@ impl RustIpfsConfig { }, EnvVar { name: "CERAMIC_ONE_SWARM_ADDRESSES".to_owned(), - value: Some("/ip4/0.0.0.0/tcp/4001".to_owned()), + value: Some(format!("/ip4/0.0.0.0/tcp/{CERAMIC_ONE_SWARM_PORT}")), ..Default::default() }, EnvVar { @@ -225,13 +227,13 @@ impl RustIpfsConfig { // Construct the set of ports let mut ports = vec![ ContainerPort { - container_port: 4001, + container_port: CERAMIC_ONE_SWARM_PORT, name: Some("swarm-tcp".to_owned()), protocol: Some("TCP".to_owned()), ..Default::default() }, ContainerPort { - container_port: IPFS_SERVICE_PORT, + container_port: CERAMIC_ONE_IPFS_PORT, name: Some("rpc".to_owned()), protocol: Some("TCP".to_owned()), ..Default::default() @@ -361,7 +363,7 @@ ipfs config --json Addresses.Gateway '[]' # Enable pubsub ipfs config --json PubSub.Enabled true # Only listen on specific tcp address as nothing else is exposed -ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4001"]' +ipfs config --json Addresses.Swarm '["/ip4/0.0.0.0/tcp/4101"]' # Set explicit resource manager limits as Kubo computes them based off # the k8s node resources and not the pods limits. ipfs config Swarm.ResourceMgr.MaxMemory '400 MB' @@ -415,13 +417,13 @@ ipfs config --json Swarm.ResourceMgr.MaxFileDescriptors 500000 name: IPFS_CONTAINER_NAME.to_owned(), ports: Some(vec![ ContainerPort { - container_port: 4001, + container_port: CERAMIC_ONE_SWARM_PORT, name: Some("swarm-tcp".to_owned()), protocol: Some("TCP".to_owned()), ..Default::default() }, ContainerPort { - container_port: IPFS_SERVICE_PORT, + container_port: CERAMIC_ONE_IPFS_PORT, name: Some("rpc".to_owned()), protocol: Some("TCP".to_owned()), ..Default::default() diff --git a/operator/src/network/testdata/ceramic_go_ss_1 b/operator/src/network/testdata/ceramic_go_ss_1 index a1cb5df1..ccf5d916 100644 --- a/operator/src/network/testdata/ceramic_go_ss_1 +++ b/operator/src/network/testdata/ceramic_go_ss_1 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -223,12 +223,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -290,7 +290,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_go_svc_1 b/operator/src/network/testdata/ceramic_go_svc_1 index 19f96918..d4d5fe5c 100644 --- a/operator/src/network/testdata/ceramic_go_svc_1 +++ b/operator/src/network/testdata/ceramic_go_svc_1 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_ss_1 b/operator/src/network/testdata/ceramic_ss_1 index 38ff9a10..aa29ec1c 100644 --- a/operator/src/network/testdata/ceramic_ss_1 +++ b/operator/src/network/testdata/ceramic_ss_1 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_ss_weighted_0 b/operator/src/network/testdata/ceramic_ss_weighted_0 index e1bb73ae..8cc42df9 100644 --- a/operator/src/network/testdata/ceramic_ss_weighted_0 +++ b/operator/src/network/testdata/ceramic_ss_weighted_0 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_ss_weighted_1 b/operator/src/network/testdata/ceramic_ss_weighted_1 index 6bde9635..9ba79863 100644 --- a/operator/src/network/testdata/ceramic_ss_weighted_1 +++ b/operator/src/network/testdata/ceramic_ss_weighted_1 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_ss_weighted_2 b/operator/src/network/testdata/ceramic_ss_weighted_2 index 379b45c7..9fda600b 100644 --- a/operator/src/network/testdata/ceramic_ss_weighted_2 +++ b/operator/src/network/testdata/ceramic_ss_weighted_2 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_ss_weighted_3 b/operator/src/network/testdata/ceramic_ss_weighted_3 index f0a25ff8..688cc72b 100644 --- a/operator/src/network/testdata/ceramic_ss_weighted_3 +++ b/operator/src/network/testdata/ceramic_ss_weighted_3 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_ss_weighted_4 b/operator/src/network/testdata/ceramic_ss_weighted_4 index b9b2620d..6e2ff272 100644 --- a/operator/src/network/testdata/ceramic_ss_weighted_4 +++ b/operator/src/network/testdata/ceramic_ss_weighted_4 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_ss_weighted_5 b/operator/src/network/testdata/ceramic_ss_weighted_5 index 7174964e..3fa2e011 100644 --- a/operator/src/network/testdata/ceramic_ss_weighted_5 +++ b/operator/src/network/testdata/ceramic_ss_weighted_5 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_ss_weighted_6 b/operator/src/network/testdata/ceramic_ss_weighted_6 index 72714642..12fbd68c 100644 --- a/operator/src/network/testdata/ceramic_ss_weighted_6 +++ b/operator/src/network/testdata/ceramic_ss_weighted_6 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_ss_weighted_7 b/operator/src/network/testdata/ceramic_ss_weighted_7 index 6beee08d..1bd6720b 100644 --- a/operator/src/network/testdata/ceramic_ss_weighted_7 +++ b/operator/src/network/testdata/ceramic_ss_weighted_7 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_ss_weighted_8 b/operator/src/network/testdata/ceramic_ss_weighted_8 index 3daa5bac..50973496 100644 --- a/operator/src/network/testdata/ceramic_ss_weighted_8 +++ b/operator/src/network/testdata/ceramic_ss_weighted_8 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_ss_weighted_9 b/operator/src/network/testdata/ceramic_ss_weighted_9 index 446a5c48..52b5fb12 100644 --- a/operator/src/network/testdata/ceramic_ss_weighted_9 +++ b/operator/src/network/testdata/ceramic_ss_weighted_9 @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/ceramic_svc_1 b/operator/src/network/testdata/ceramic_svc_1 index 19f96918..d4d5fe5c 100644 --- a/operator/src/network/testdata/ceramic_svc_1 +++ b/operator/src/network/testdata/ceramic_svc_1 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_svc_weighted_0 b/operator/src/network/testdata/ceramic_svc_weighted_0 index 275f8222..93a5168a 100644 --- a/operator/src/network/testdata/ceramic_svc_weighted_0 +++ b/operator/src/network/testdata/ceramic_svc_weighted_0 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_svc_weighted_1 b/operator/src/network/testdata/ceramic_svc_weighted_1 index 19f96918..d4d5fe5c 100644 --- a/operator/src/network/testdata/ceramic_svc_weighted_1 +++ b/operator/src/network/testdata/ceramic_svc_weighted_1 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_svc_weighted_2 b/operator/src/network/testdata/ceramic_svc_weighted_2 index 5a649e8b..2710ca46 100644 --- a/operator/src/network/testdata/ceramic_svc_weighted_2 +++ b/operator/src/network/testdata/ceramic_svc_weighted_2 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_svc_weighted_3 b/operator/src/network/testdata/ceramic_svc_weighted_3 index 43a1a970..1056510e 100644 --- a/operator/src/network/testdata/ceramic_svc_weighted_3 +++ b/operator/src/network/testdata/ceramic_svc_weighted_3 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_svc_weighted_4 b/operator/src/network/testdata/ceramic_svc_weighted_4 index 0065263e..b0eb9231 100644 --- a/operator/src/network/testdata/ceramic_svc_weighted_4 +++ b/operator/src/network/testdata/ceramic_svc_weighted_4 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_svc_weighted_5 b/operator/src/network/testdata/ceramic_svc_weighted_5 index c6f0c4fb..7ec4a387 100644 --- a/operator/src/network/testdata/ceramic_svc_weighted_5 +++ b/operator/src/network/testdata/ceramic_svc_weighted_5 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_svc_weighted_6 b/operator/src/network/testdata/ceramic_svc_weighted_6 index 791da656..450d02b3 100644 --- a/operator/src/network/testdata/ceramic_svc_weighted_6 +++ b/operator/src/network/testdata/ceramic_svc_weighted_6 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_svc_weighted_7 b/operator/src/network/testdata/ceramic_svc_weighted_7 index cf7835b4..7eafdf50 100644 --- a/operator/src/network/testdata/ceramic_svc_weighted_7 +++ b/operator/src/network/testdata/ceramic_svc_weighted_7 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_svc_weighted_8 b/operator/src/network/testdata/ceramic_svc_weighted_8 index 55d9dc85..ab6c8d5c 100644 --- a/operator/src/network/testdata/ceramic_svc_weighted_8 +++ b/operator/src/network/testdata/ceramic_svc_weighted_8 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_svc_weighted_9 b/operator/src/network/testdata/ceramic_svc_weighted_9 index 292afd74..78972610 100644 --- a/operator/src/network/testdata/ceramic_svc_weighted_9 +++ b/operator/src/network/testdata/ceramic_svc_weighted_9 @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/ceramic_weighted_peers b/operator/src/network/testdata/ceramic_weighted_peers index 1c8e7566..e07bbc64 100644 --- a/operator/src/network/testdata/ceramic_weighted_peers +++ b/operator/src/network/testdata/ceramic_weighted_peers @@ -8,7 +8,7 @@ Request { body: { "apiVersion": "v1", "data": { - "peers.json": "[{\"ceramic\":{\"peerId\":\"peer_id_0\",\"ipfsRpcAddr\":\"http://ceramic-0-0.ceramic-0.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-0-0.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_1\",\"ipfsRpcAddr\":\"http://ceramic-0-1.ceramic-0.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-0-1.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_2\",\"ipfsRpcAddr\":\"http://ceramic-0-2.ceramic-0.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-0-2.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_3\",\"ipfsRpcAddr\":\"http://ceramic-0-3.ceramic-0.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-0-3.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_4\",\"ipfsRpcAddr\":\"http://ceramic-0-4.ceramic-0.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-0-4.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_5\",\"ipfsRpcAddr\":\"http://ceramic-0-5.ceramic-0.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-0-5.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_6\",\"ipfsRpcAddr\":\"http://ceramic-0-6.ceramic-0.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-0-6.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_7\",\"ipfsRpcAddr\":\"http://ceramic-0-7.ceramic-0.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-0-7.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_8\",\"ipfsRpcAddr\":\"http://ceramic-0-8.ceramic-0.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-0-8.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_9\",\"ipfsRpcAddr\":\"http://ceramic-0-9.ceramic-0.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-0-9.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_10\",\"ipfsRpcAddr\":\"http://ceramic-1-0.ceramic-1.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-1-0.ceramic-1.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_11\",\"ipfsRpcAddr\":\"http://ceramic-1-1.ceramic-1.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-1-1.ceramic-1.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_12\",\"ipfsRpcAddr\":\"http://ceramic-2-0.ceramic-2.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-2-0.ceramic-2.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_13\",\"ipfsRpcAddr\":\"http://ceramic-3-0.ceramic-3.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-3-0.ceramic-3.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_14\",\"ipfsRpcAddr\":\"http://ceramic-4-0.ceramic-4.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-4-0.ceramic-4.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_15\",\"ipfsRpcAddr\":\"http://ceramic-5-0.ceramic-5.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-5-0.ceramic-5.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_16\",\"ipfsRpcAddr\":\"http://ceramic-6-0.ceramic-6.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-6-0.ceramic-6.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_17\",\"ipfsRpcAddr\":\"http://ceramic-7-0.ceramic-7.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-7-0.ceramic-7.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_18\",\"ipfsRpcAddr\":\"http://ceramic-8-0.ceramic-8.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-8-0.ceramic-8.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_19\",\"ipfsRpcAddr\":\"http://ceramic-9-0.ceramic-9.keramik-test.svc.cluster.local:5001\",\"ceramicAddr\":\"http://ceramic-9-0.ceramic-9.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ipfs\":{\"peerId\":\"cas_peer_id\",\"ipfsRpcAddr\":\"http://cas-ipfs-0.cas-ipfs.keramik-test.svc.cluster.local:5001\",\"p2pAddrs\":[]}}]" + "peers.json": "[{\"ceramic\":{\"peerId\":\"peer_id_0\",\"ipfsRpcAddr\":\"http://ceramic-0-0.ceramic-0.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-0-0.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_1\",\"ipfsRpcAddr\":\"http://ceramic-0-1.ceramic-0.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-0-1.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_2\",\"ipfsRpcAddr\":\"http://ceramic-0-2.ceramic-0.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-0-2.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_3\",\"ipfsRpcAddr\":\"http://ceramic-0-3.ceramic-0.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-0-3.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_4\",\"ipfsRpcAddr\":\"http://ceramic-0-4.ceramic-0.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-0-4.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_5\",\"ipfsRpcAddr\":\"http://ceramic-0-5.ceramic-0.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-0-5.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_6\",\"ipfsRpcAddr\":\"http://ceramic-0-6.ceramic-0.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-0-6.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_7\",\"ipfsRpcAddr\":\"http://ceramic-0-7.ceramic-0.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-0-7.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_8\",\"ipfsRpcAddr\":\"http://ceramic-0-8.ceramic-0.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-0-8.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_9\",\"ipfsRpcAddr\":\"http://ceramic-0-9.ceramic-0.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-0-9.ceramic-0.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_10\",\"ipfsRpcAddr\":\"http://ceramic-1-0.ceramic-1.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-1-0.ceramic-1.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_11\",\"ipfsRpcAddr\":\"http://ceramic-1-1.ceramic-1.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-1-1.ceramic-1.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_12\",\"ipfsRpcAddr\":\"http://ceramic-2-0.ceramic-2.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-2-0.ceramic-2.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_13\",\"ipfsRpcAddr\":\"http://ceramic-3-0.ceramic-3.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-3-0.ceramic-3.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_14\",\"ipfsRpcAddr\":\"http://ceramic-4-0.ceramic-4.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-4-0.ceramic-4.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_15\",\"ipfsRpcAddr\":\"http://ceramic-5-0.ceramic-5.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-5-0.ceramic-5.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_16\",\"ipfsRpcAddr\":\"http://ceramic-6-0.ceramic-6.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-6-0.ceramic-6.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_17\",\"ipfsRpcAddr\":\"http://ceramic-7-0.ceramic-7.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-7-0.ceramic-7.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_18\",\"ipfsRpcAddr\":\"http://ceramic-8-0.ceramic-8.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-8-0.ceramic-8.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ceramic\":{\"peerId\":\"peer_id_19\",\"ipfsRpcAddr\":\"http://ceramic-9-0.ceramic-9.keramik-test.svc.cluster.local:5101\",\"ceramicAddr\":\"http://ceramic-9-0.ceramic-9.keramik-test.svc.cluster.local:7007\",\"p2pAddrs\":[]}},{\"ipfs\":{\"peerId\":\"cas_peer_id\",\"ipfsRpcAddr\":\"http://cas-ipfs-0.cas-ipfs.keramik-test.svc.cluster.local:5101\",\"p2pAddrs\":[]}}]" }, "kind": "ConfigMap", "metadata": { diff --git a/operator/src/network/testdata/ceramics_weighted_status b/operator/src/network/testdata/ceramics_weighted_status index 2b97be40..de30b02f 100644 --- a/operator/src/network/testdata/ceramics_weighted_status +++ b/operator/src/network/testdata/ceramics_weighted_status @@ -13,7 +13,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-0-0.ceramic-0.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-0-0.ceramic-0.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-0-0.ceramic-0.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_0" } @@ -21,7 +21,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-0-1.ceramic-0.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-0-1.ceramic-0.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-0-1.ceramic-0.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_1" } @@ -29,7 +29,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-0-2.ceramic-0.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-0-2.ceramic-0.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-0-2.ceramic-0.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_2" } @@ -37,7 +37,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-0-3.ceramic-0.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-0-3.ceramic-0.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-0-3.ceramic-0.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_3" } @@ -45,7 +45,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-0-4.ceramic-0.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-0-4.ceramic-0.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-0-4.ceramic-0.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_4" } @@ -53,7 +53,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-0-5.ceramic-0.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-0-5.ceramic-0.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-0-5.ceramic-0.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_5" } @@ -61,7 +61,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-0-6.ceramic-0.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-0-6.ceramic-0.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-0-6.ceramic-0.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_6" } @@ -69,7 +69,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-0-7.ceramic-0.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-0-7.ceramic-0.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-0-7.ceramic-0.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_7" } @@ -77,7 +77,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-0-8.ceramic-0.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-0-8.ceramic-0.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-0-8.ceramic-0.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_8" } @@ -85,7 +85,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-0-9.ceramic-0.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-0-9.ceramic-0.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-0-9.ceramic-0.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_9" } @@ -93,7 +93,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-1-0.ceramic-1.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-1-0.ceramic-1.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-1-0.ceramic-1.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_10" } @@ -101,7 +101,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-1-1.ceramic-1.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-1-1.ceramic-1.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-1-1.ceramic-1.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_11" } @@ -109,7 +109,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-2-0.ceramic-2.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-2-0.ceramic-2.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-2-0.ceramic-2.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_12" } @@ -117,7 +117,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-3-0.ceramic-3.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-3-0.ceramic-3.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-3-0.ceramic-3.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_13" } @@ -125,7 +125,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-4-0.ceramic-4.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-4-0.ceramic-4.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-4-0.ceramic-4.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_14" } @@ -133,7 +133,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-5-0.ceramic-5.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-5-0.ceramic-5.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-5-0.ceramic-5.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_15" } @@ -141,7 +141,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-6-0.ceramic-6.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-6-0.ceramic-6.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-6-0.ceramic-6.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_16" } @@ -149,7 +149,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-7-0.ceramic-7.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-7-0.ceramic-7.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-7-0.ceramic-7.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_17" } @@ -157,7 +157,7 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-8-0.ceramic-8.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-8-0.ceramic-8.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-8-0.ceramic-8.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_18" } @@ -165,14 +165,14 @@ Request { { "ceramic": { "ceramicAddr": "http://ceramic-9-0.ceramic-9.keramik-test.svc.cluster.local:7007", - "ipfsRpcAddr": "http://ceramic-9-0.ceramic-9.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://ceramic-9-0.ceramic-9.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "peer_id_19" } }, { "ipfs": { - "ipfsRpcAddr": "http://cas-ipfs-0.cas-ipfs.keramik-test.svc.cluster.local:5001", + "ipfsRpcAddr": "http://cas-ipfs-0.cas-ipfs.keramik-test.svc.cluster.local:5101", "p2pAddrs": [], "peerId": "cas_peer_id" } diff --git a/operator/src/network/testdata/default_stubs/cas_ipfs_service b/operator/src/network/testdata/default_stubs/cas_ipfs_service index 9a682483..3a71ad4e 100644 --- a/operator/src/network/testdata/default_stubs/cas_ipfs_service +++ b/operator/src/network/testdata/default_stubs/cas_ipfs_service @@ -20,9 +20,9 @@ Request { "ports": [ { "name": "cas-ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP", - "targetPort": 5001 + "targetPort": 5101 } ], "selector": { diff --git a/operator/src/network/testdata/default_stubs/cas_ipfs_stateful_set b/operator/src/network/testdata/default_stubs/cas_ipfs_stateful_set index 574e51be..c70dbf3b 100644 --- a/operator/src/network/testdata/default_stubs/cas_ipfs_stateful_set +++ b/operator/src/network/testdata/default_stubs/cas_ipfs_stateful_set @@ -38,7 +38,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -66,7 +66,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -78,12 +78,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, diff --git a/operator/src/network/testdata/default_stubs/cas_stateful_set b/operator/src/network/testdata/default_stubs/cas_stateful_set index cbe4cf4b..72f4f9c6 100644 --- a/operator/src/network/testdata/default_stubs/cas_stateful_set +++ b/operator/src/network/testdata/default_stubs/cas_stateful_set @@ -249,7 +249,7 @@ Request { }, { "name": "IPFS_API_URL", - "value": "http://cas-ipfs:5001" + "value": "http://cas-ipfs:5101" }, { "name": "IPFS_API_TIMEOUT", diff --git a/operator/src/network/testdata/default_stubs/ceramic_service b/operator/src/network/testdata/default_stubs/ceramic_service index 275f8222..93a5168a 100644 --- a/operator/src/network/testdata/default_stubs/ceramic_service +++ b/operator/src/network/testdata/default_stubs/ceramic_service @@ -26,12 +26,12 @@ Request { }, { "name": "ipfs", - "port": 5001, + "port": 5101, "protocol": "TCP" }, { "name": "swarm-tcp", - "port": 4001, + "port": 4101, "protocol": "TCP" } ], diff --git a/operator/src/network/testdata/default_stubs/ceramic_stateful_set b/operator/src/network/testdata/default_stubs/ceramic_stateful_set index 89eaae8d..a312d467 100644 --- a/operator/src/network/testdata/default_stubs/ceramic_stateful_set +++ b/operator/src/network/testdata/default_stubs/ceramic_stateful_set @@ -55,7 +55,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", @@ -221,7 +221,7 @@ Request { "env": [ { "name": "CERAMIC_ONE_BIND_ADDRESS", - "value": "0.0.0.0:5001" + "value": "0.0.0.0:5101" }, { "name": "CERAMIC_ONE_KADEMLIA_PARALLELISM", @@ -249,7 +249,7 @@ Request { }, { "name": "CERAMIC_ONE_SWARM_ADDRESSES", - "value": "/ip4/0.0.0.0/tcp/4001" + "value": "/ip4/0.0.0.0/tcp/4101" }, { "name": "RUST_LOG", @@ -261,12 +261,12 @@ Request { "name": "ipfs", "ports": [ { - "containerPort": 4001, + "containerPort": 4101, "name": "swarm-tcp", "protocol": "TCP" }, { - "containerPort": 5001, + "containerPort": 5101, "name": "rpc", "protocol": "TCP" }, @@ -323,7 +323,7 @@ Request { }, { "name": "CERAMIC_IPFS_HOST", - "value": "http://localhost:5001" + "value": "http://localhost:5101" }, { "name": "CERAMIC_LOG_LEVEL", diff --git a/operator/src/network/testdata/go_ipfs_configmap b/operator/src/network/testdata/go_ipfs_configmap index 7f91af2d..0c1faabe 100644 --- a/operator/src/network/testdata/go_ipfs_configmap +++ b/operator/src/network/testdata/go_ipfs_configmap @@ -8,7 +8,7 @@ Request { body: { "apiVersion": "v1", "data": { - "001-config.sh": "#!/bin/sh\nset -ex\n# Do not bootstrap against public nodes\nipfs bootstrap rm all\n# Do not sticky peer with ceramic specific peers\n# We want an isolated network\nipfs config --json Peering.Peers '[]'\n# Disable the gateway\nipfs config --json Addresses.Gateway '[]'\n# Enable pubsub\nipfs config --json PubSub.Enabled true\n# Only listen on specific tcp address as nothing else is exposed\nipfs config --json Addresses.Swarm '[\"/ip4/0.0.0.0/tcp/4001\"]'\n# Set explicit resource manager limits as Kubo computes them based off\n# the k8s node resources and not the pods limits.\nipfs config Swarm.ResourceMgr.MaxMemory '400 MB'\nipfs config --json Swarm.ResourceMgr.MaxFileDescriptors 500000\n" + "001-config.sh": "#!/bin/sh\nset -ex\n# Do not bootstrap against public nodes\nipfs bootstrap rm all\n# Do not sticky peer with ceramic specific peers\n# We want an isolated network\nipfs config --json Peering.Peers '[]'\n# Disable the gateway\nipfs config --json Addresses.Gateway '[]'\n# Enable pubsub\nipfs config --json PubSub.Enabled true\n# Only listen on specific tcp address as nothing else is exposed\nipfs config --json Addresses.Swarm '[\"/ip4/0.0.0.0/tcp/4101\"]'\n# Set explicit resource manager limits as Kubo computes them based off\n# the k8s node resources and not the pods limits.\nipfs config Swarm.ResourceMgr.MaxMemory '400 MB'\nipfs config --json Swarm.ResourceMgr.MaxFileDescriptors 500000\n" }, "kind": "ConfigMap", "metadata": { diff --git a/operator/src/network/testdata/go_ipfs_configmap_1 b/operator/src/network/testdata/go_ipfs_configmap_1 index 90bc66ed..b9d4cdec 100644 --- a/operator/src/network/testdata/go_ipfs_configmap_1 +++ b/operator/src/network/testdata/go_ipfs_configmap_1 @@ -8,7 +8,7 @@ Request { body: { "apiVersion": "v1", "data": { - "001-config.sh": "#!/bin/sh\nset -ex\n# Do not bootstrap against public nodes\nipfs bootstrap rm all\n# Do not sticky peer with ceramic specific peers\n# We want an isolated network\nipfs config --json Peering.Peers '[]'\n# Disable the gateway\nipfs config --json Addresses.Gateway '[]'\n# Enable pubsub\nipfs config --json PubSub.Enabled true\n# Only listen on specific tcp address as nothing else is exposed\nipfs config --json Addresses.Swarm '[\"/ip4/0.0.0.0/tcp/4001\"]'\n# Set explicit resource manager limits as Kubo computes them based off\n# the k8s node resources and not the pods limits.\nipfs config Swarm.ResourceMgr.MaxMemory '400 MB'\nipfs config --json Swarm.ResourceMgr.MaxFileDescriptors 500000\n" + "001-config.sh": "#!/bin/sh\nset -ex\n# Do not bootstrap against public nodes\nipfs bootstrap rm all\n# Do not sticky peer with ceramic specific peers\n# We want an isolated network\nipfs config --json Peering.Peers '[]'\n# Disable the gateway\nipfs config --json Addresses.Gateway '[]'\n# Enable pubsub\nipfs config --json PubSub.Enabled true\n# Only listen on specific tcp address as nothing else is exposed\nipfs config --json Addresses.Swarm '[\"/ip4/0.0.0.0/tcp/4101\"]'\n# Set explicit resource manager limits as Kubo computes them based off\n# the k8s node resources and not the pods limits.\nipfs config Swarm.ResourceMgr.MaxMemory '400 MB'\nipfs config --json Swarm.ResourceMgr.MaxFileDescriptors 500000\n" }, "kind": "ConfigMap", "metadata": { diff --git a/operator/src/network/testdata/go_ipfs_configmap_commands b/operator/src/network/testdata/go_ipfs_configmap_commands index f12e4180..4b0465dc 100644 --- a/operator/src/network/testdata/go_ipfs_configmap_commands +++ b/operator/src/network/testdata/go_ipfs_configmap_commands @@ -8,7 +8,7 @@ Request { body: { "apiVersion": "v1", "data": { - "001-config.sh": "#!/bin/sh\nset -ex\n# Do not bootstrap against public nodes\nipfs bootstrap rm all\n# Do not sticky peer with ceramic specific peers\n# We want an isolated network\nipfs config --json Peering.Peers '[]'\n# Disable the gateway\nipfs config --json Addresses.Gateway '[]'\n# Enable pubsub\nipfs config --json PubSub.Enabled true\n# Only listen on specific tcp address as nothing else is exposed\nipfs config --json Addresses.Swarm '[\"/ip4/0.0.0.0/tcp/4001\"]'\n# Set explicit resource manager limits as Kubo computes them based off\n# the k8s node resources and not the pods limits.\nipfs config Swarm.ResourceMgr.MaxMemory '400 MB'\nipfs config --json Swarm.ResourceMgr.MaxFileDescriptors 500000\n", + "001-config.sh": "#!/bin/sh\nset -ex\n# Do not bootstrap against public nodes\nipfs bootstrap rm all\n# Do not sticky peer with ceramic specific peers\n# We want an isolated network\nipfs config --json Peering.Peers '[]'\n# Disable the gateway\nipfs config --json Addresses.Gateway '[]'\n# Enable pubsub\nipfs config --json PubSub.Enabled true\n# Only listen on specific tcp address as nothing else is exposed\nipfs config --json Addresses.Swarm '[\"/ip4/0.0.0.0/tcp/4101\"]'\n# Set explicit resource manager limits as Kubo computes them based off\n# the k8s node resources and not the pods limits.\nipfs config Swarm.ResourceMgr.MaxMemory '400 MB'\nipfs config --json Swarm.ResourceMgr.MaxFileDescriptors 500000\n", "002-config.sh": "#!/bin/sh\nset -ex\nipfs config Pubsub.SeenMessagesTTL 10m\nipfs config --json Swarm.RelayClient.Enabled false" }, "kind": "ConfigMap", From bb4c0bffb587a78b9acc4cf74b093a689f60bc5f Mon Sep 17 00:00:00 2001 From: Mohsin Zaidi <2236875+smrz2001@users.noreply.github.com> Date: Tue, 2 Jul 2024 15:42:05 -0400 Subject: [PATCH 09/11] fix: disable js-ceramic node metrics (#195) --- operator/src/network/ceramic.rs | 3 ++- .../src/network/testdata/default_stubs/ceramic_init_configmap | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/operator/src/network/ceramic.rs b/operator/src/network/ceramic.rs index be8f8bdd..4ef8f34d 100644 --- a/operator/src/network/ceramic.rs +++ b/operator/src/network/ceramic.rs @@ -89,7 +89,8 @@ r#"{ "pubsub-topic": "${CERAMIC_NETWORK_TOPIC}" }, "node": { - "privateSeedUrl": "inplace:ed25519#${CERAMIC_ADMIN_PRIVATE_KEY}" + "privateSeedUrl": "inplace:ed25519#${CERAMIC_ADMIN_PRIVATE_KEY}", + "metrics-publisher-enabled": false }, "state-store": { "mode": "fs", diff --git a/operator/src/network/testdata/default_stubs/ceramic_init_configmap b/operator/src/network/testdata/default_stubs/ceramic_init_configmap index a4017138..edec30aa 100644 --- a/operator/src/network/testdata/default_stubs/ceramic_init_configmap +++ b/operator/src/network/testdata/default_stubs/ceramic_init_configmap @@ -9,7 +9,7 @@ Request { "apiVersion": "v1", "data": { "ceramic-init.sh": "#!/bin/bash\n\nset -eo pipefail\n\nexport CERAMIC_ADMIN_DID=$(composedb did:from-private-key ${CERAMIC_ADMIN_PRIVATE_KEY})\n\nCERAMIC_ADMIN_DID=$CERAMIC_ADMIN_DID envsubst < /ceramic-init/daemon-config.json > /config/daemon-config.json\n", - "daemon-config.json": "{\n \"anchor\": {\n \"auth-method\": \"did\",\n \"anchor-service-url\": \"${CAS_API_URL}\",\n \"ethereum-rpc-url\": \"${ETH_RPC_URL}\"\n },\n \"http-api\": {\n \"cors-allowed-origins\": [\n \"${CERAMIC_CORS_ALLOWED_ORIGINS}\"\n ],\n \"admin-dids\": [\n \"${CERAMIC_ADMIN_DID}\"\n ]\n },\n \"ipfs\": {\n \"mode\": \"remote\",\n \"host\": \"${CERAMIC_IPFS_HOST}\"\n },\n \"logger\": {\n \"log-level\": ${CERAMIC_LOG_LEVEL},\n \"log-to-files\": false\n },\n \"metrics\": {\n \"metrics-exporter-enabled\": false,\n \"prometheus-exporter-enabled\": true,\n \"prometheus-exporter-port\": 9464\n },\n \"network\": {\n \"name\": \"${CERAMIC_NETWORK}\",\n \"pubsub-topic\": \"${CERAMIC_NETWORK_TOPIC}\"\n },\n \"node\": {\n \"privateSeedUrl\": \"inplace:ed25519#${CERAMIC_ADMIN_PRIVATE_KEY}\"\n },\n \"state-store\": {\n \"mode\": \"fs\",\n \"local-directory\": \"${CERAMIC_STATE_STORE_PATH}\"\n },\n \"indexing\": {\n \"db\": \"postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@localhost/${POSTGRES_DB}\",\n \"allow-queries-before-historical-sync\": true,\n \"disable-composedb\": false,\n \"enable-historical-sync\": false\n }\n}" + "daemon-config.json": "{\n \"anchor\": {\n \"auth-method\": \"did\",\n \"anchor-service-url\": \"${CAS_API_URL}\",\n \"ethereum-rpc-url\": \"${ETH_RPC_URL}\"\n },\n \"http-api\": {\n \"cors-allowed-origins\": [\n \"${CERAMIC_CORS_ALLOWED_ORIGINS}\"\n ],\n \"admin-dids\": [\n \"${CERAMIC_ADMIN_DID}\"\n ]\n },\n \"ipfs\": {\n \"mode\": \"remote\",\n \"host\": \"${CERAMIC_IPFS_HOST}\"\n },\n \"logger\": {\n \"log-level\": ${CERAMIC_LOG_LEVEL},\n \"log-to-files\": false\n },\n \"metrics\": {\n \"metrics-exporter-enabled\": false,\n \"prometheus-exporter-enabled\": true,\n \"prometheus-exporter-port\": 9464\n },\n \"network\": {\n \"name\": \"${CERAMIC_NETWORK}\",\n \"pubsub-topic\": \"${CERAMIC_NETWORK_TOPIC}\"\n },\n \"node\": {\n \"privateSeedUrl\": \"inplace:ed25519#${CERAMIC_ADMIN_PRIVATE_KEY}\",\n \"metrics-publisher-enabled\": false\n },\n \"state-store\": {\n \"mode\": \"fs\",\n \"local-directory\": \"${CERAMIC_STATE_STORE_PATH}\"\n },\n \"indexing\": {\n \"db\": \"postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@localhost/${POSTGRES_DB}\",\n \"allow-queries-before-historical-sync\": true,\n \"disable-composedb\": false,\n \"enable-historical-sync\": false\n }\n}" }, "kind": "ConfigMap", "metadata": { From 14944521c3a9c4c6ab97c938ccb25f5a05651000 Mon Sep 17 00:00:00 2001 From: Samika Kashyap Date: Tue, 18 Jun 2024 15:28:30 -0700 Subject: [PATCH 10/11] chore: fix typos --- keramik/src/simulation.md | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/keramik/src/simulation.md b/keramik/src/simulation.md index d2cd8ec3..ee141825 100644 --- a/keramik/src/simulation.md +++ b/keramik/src/simulation.md @@ -160,8 +160,9 @@ kubectl apply -f custom-ipfs.yaml ### Example Custom Simulation for Ceramic Anchoring Benchmark Use this example to run a simulation which uses the CAS Api defined in the network spec. -anchorWaitTime : Wait time in seconds for how long we want to wait after streams have been created to check when they have been anchored. This should be a high number like 30-40 minutes. -throttleRequests: Number of requests to send per second. +`anchorWaitTime`: Wait time in seconds for how long we want to wait after streams have been created to check when they have been anchored. This should be a high number like 30-40 minutes. +`throttleRequests`: Number of requests to send per second. + ```yaml # ceramic-anchoring-benchamrk.yaml --- @@ -177,6 +178,8 @@ spec: runTime: 60 throttleRequests: 100 anchorWaitTime: 2400 +``` + ```shell kubectl apply -f ceramic-anchoring-benchamrk.yaml @@ -184,10 +187,13 @@ kubectl apply -f ceramic-anchoring-benchamrk.yaml ### Example Custom Simulation for cas-benchmark -Use this example to run a simulation you can pass in the the cas-api-url, the network-type, and the private secret ket as the controller. -By default the casNetwork is set to "https://cas-dev-direct.3boxlabs.com" and the casController is set to the private key of the controller DID. -casNetwork: The url of the CAS network to run the simulation against. -casController: The private key of the controller DID to use for the simulation. +Use this example to run a simulation you can pass in the the cas-api-url, the network-type, and the private secret key in the spec. +By default the casNetwork and casController are set to run against cas-dev-direct Api. + +`casNetwork`: The url of the CAS network to run the simulation against. + +`casController`: The private key of the controller DID to use for the simulation. + ```yaml # cas-benchmark.yaml --- @@ -204,6 +210,7 @@ spec: throttleRequests: 100 casNetwork: "https://cas-dev-direct.3boxlabs.com" casController: "did:key:" +``` ```shell kubectl apply -f cas-benchmark.yaml From 479245d4f11766a22151757fc62053b61118a904 Mon Sep 17 00:00:00 2001 From: Samika Kashyap Date: Fri, 12 Jul 2024 12:21:10 -0700 Subject: [PATCH 11/11] feat: runner logic for load generation --- operator/src/lgen/controller.rs | 193 +++++++++++++++ operator/src/lgen/job.rs | 118 +++++++++ operator/src/lgen/mod.rs | 3 + operator/src/lgen/spec.rs | 48 ++++ operator/src/main.rs | 7 +- runner/src/load_generator/gen.rs | 223 ++++++++++++++++++ runner/src/load_generator/mod.rs | 3 + .../utils/ceramic_models_utils.rs | 167 +++++++++++++ .../load_generator/utils/generator_utils.rs | 114 +++++++++ runner/src/load_generator/utils/mod.rs | 2 + runner/src/main.rs | 41 +++- 11 files changed, 904 insertions(+), 15 deletions(-) create mode 100644 operator/src/lgen/controller.rs create mode 100644 operator/src/lgen/job.rs create mode 100644 operator/src/lgen/mod.rs create mode 100644 operator/src/lgen/spec.rs create mode 100644 runner/src/load_generator/gen.rs create mode 100644 runner/src/load_generator/mod.rs create mode 100644 runner/src/load_generator/utils/ceramic_models_utils.rs create mode 100644 runner/src/load_generator/utils/generator_utils.rs create mode 100644 runner/src/load_generator/utils/mod.rs diff --git a/operator/src/lgen/controller.rs b/operator/src/lgen/controller.rs new file mode 100644 index 00000000..f1097229 --- /dev/null +++ b/operator/src/lgen/controller.rs @@ -0,0 +1,193 @@ +use std::{sync::Arc, time::Duration}; + +use futures::stream::StreamExt; +use k8s_openapi::api::{batch::v1::Job}; +use kube::{ + api::{Patch, PatchParams}, + client::Client, + core::object::HasSpec, + runtime::Controller, + Api, +}; +use kube::{ + runtime::{ + controller::Action, + watcher::{self, Config}, + }, + Resource, ResourceExt, +}; +use opentelemetry::{global, KeyValue}; +use rand::{distributions::Alphanumeric, thread_rng, Rng, RngCore}; + +use tracing::{debug, error, info}; + +use crate::{ + labels::MANAGED_BY_LABEL_SELECTOR, lgen::{ + job::{JobConfig, JobImageConfig, job_spec}, spec::{LoadGenerator, LoadGeneratorStatus}, + }, simulation::controller::{get_num_peers, monitoring_ready}, utils::Clock +}; + +use crate::network::{ + ipfs_rpc::{HttpRpcClient, IpfsRpcClient}, +}; + + +use crate::utils::{apply_job, apply_service, apply_stateful_set, Context}; + +pub const LOAD_GENERATOR_JOB_NAME: &str = "load-gen-job"; + +/// Handle errors during reconciliation. +fn on_error( + _network: Arc, + _error: &Error, + _context: Arc>, +) -> Action { + Action::requeue(Duration::from_secs(5)) +} + +/// Errors produced by the reconcile function. +#[derive(Debug, thiserror::Error)] +enum Error { + #[error("App error: {source}")] + App { + #[from] + source: anyhow::Error, + }, + #[error("Kube error: {source}")] + Kube { + #[from] + source: kube::Error, + }, +} + +/// Start a controller for the LoadGenerator CRD. +pub async fn run() { + let k_client = Client::try_default().await.unwrap(); + let context = Arc::new( + Context::new(k_client.clone(), HttpRpcClient).expect("should be able to create context"), + ); + + let load_generators: Api = Api::all(k_client.clone()); + let jobs = Api::::all(k_client.clone()); + + Controller::new(load_generators.clone(), Config::default()) + .owns( + jobs, + watcher::Config::default().labels(MANAGED_BY_LABEL_SELECTOR), + ) + .run(reconcile, on_error, context) + .for_each(|rec_res| async move { + match rec_res { + Ok((load_generator, _)) => { + info!(load_generator.name, "reconcile success"); + } + Err(err) => { + error!(?err, "reconcile error") + } + } + }) + .await; +} + +/// Perform a reconcile pass for the LoadGenerator CRD +async fn reconcile( + load_generator: Arc, + cx: Arc>, +) -> Result { + let meter = global::meter("keramik"); + let runs = meter + .u64_counter("load_generator_reconcile_count") + .with_description("Number of load generator reconciles") + .init(); + + match reconcile_(load_generator, cx).await { + Ok(action) => { + runs.add( + 1, + &[KeyValue { + key: "result".into(), + value: "ok".into(), + }], + ); + Ok(action) + } + Err(err) => { + runs.add( + 1, + &[KeyValue { + key: "result".into(), + value: "err".into(), + }], + ); + Err(err) + } + } +} + +/// Perform a reconcile pass for the LoadGenerator CRD +async fn reconcile_( + load_generator: Arc, + cx: Arc>, +) -> Result { + let spec = load_generator.spec(); + + let status = if let Some(status) = &load_generator.status { + status.clone() + } else { + // Generate new status with random name and nonce + LoadGeneratorStatus { + nonce: thread_rng().gen(), + name: "load-gen-" + .chars() + .chain( + thread_rng() + .sample_iter(&Alphanumeric) + .take(6) + .map(char::from), + ) + .collect::(), + } + }; + debug!(?spec, ?status, "reconcile"); + + let ns = load_generator.namespace().unwrap(); + let num_peers = get_num_peers(cx.clone(), &ns).await?; + + // The load generator does not deploy the monitoring resources but they must exist in order to + // collect the results of load generators. + let ready = monitoring_ready(cx.clone(), &ns).await?; + + if !ready { + return Ok(Action::requeue(Duration::from_secs(10))); + } + + + let job_image_config = JobImageConfig::from(spec); + + let job_config = JobConfig { + name: status.name.clone(), + scenario: spec.scenario.to_owned(), + users: spec.users.to_owned(), + run_time: spec.run_time.to_owned(), + nonce: status.nonce, + job_image_config: job_image_config.clone(), + throttle_requests: spec.throttle_requests, + }; + let orefs = load_generator + .controller_owner_ref(&()) + .map(|oref| vec![oref]) + .unwrap_or_default(); + + apply_job(cx.clone(), &ns, orefs.clone(), LOAD_GENERATOR_JOB_NAME, job_spec(job_config)).await?; + + let load_generators: Api = Api::namespaced(cx.k_client.clone(), &ns); + let _patched = load_generators + .patch_status( + &load_generator.name_any(), + &PatchParams::default(), + &Patch::Merge(serde_json::json!({ "status": status })), + ) + .await?; + + Ok(Action::requeue(Duration::from_secs(10))) +} \ No newline at end of file diff --git a/operator/src/lgen/job.rs b/operator/src/lgen/job.rs new file mode 100644 index 00000000..b30237c4 --- /dev/null +++ b/operator/src/lgen/job.rs @@ -0,0 +1,118 @@ +use std::collections::BTreeMap; + +use k8s_openapi::api::{batch::v1::JobSpec, core::v1::{ConfigMapVolumeSource, Container, EnvVar, PodSpec, PodTemplateSpec, Volume, VolumeMount}}; +use kube::api::ObjectMeta; +use crate::{lgen::spec::LoadGeneratorSpec, network::PEERS_CONFIG_MAP_NAME}; + +/// Configuration for job images. +#[derive(Clone, Debug)] +pub struct JobImageConfig { + /// Image for all jobs created by the load generator. + pub image: String, + /// Pull policy for image. + pub image_pull_policy: String, +} + +impl Default for JobImageConfig { + fn default() -> Self { + Self { + image: "public.ecr.aws/r5b3e0r5/3box/keramik-runner:latest".to_owned(), + image_pull_policy: "Always".to_owned(), + } + } +} + +impl From<&LoadGeneratorSpec> for JobImageConfig { + fn from(value: &LoadGeneratorSpec) -> Self { + let default = Self::default(); + Self { + image: value.image.to_owned().unwrap_or(default.image), + image_pull_policy: value + .image_pull_policy + .to_owned() + .unwrap_or(default.image_pull_policy), + } + } +} + +/// JobConfig defines which properties of the JobSpec can be customized. +pub struct JobConfig { + pub name: String, + pub scenario: String, + pub users: u32, + pub run_time: u32, + pub throttle_requests: Option, + pub nonce: u32, + pub job_image_config: JobImageConfig, +} + +pub fn job_spec(config: JobConfig) -> JobSpec { + let env_vars = vec![ + EnvVar { + name: "RUNNER_OTLP_ENDPOINT".to_owned(), + value: Some("http://otel:4317".to_owned()), + ..Default::default() + }, + EnvVar { + name: "RUST_LOG".to_owned(), + value: Some("info,keramik_runner=trace".to_owned()), + ..Default::default() + }, + EnvVar { + name: "GENERATOR_NAME".to_owned(), + value: Some(config.name.to_owned()), + ..Default::default() + }, + EnvVar { + name: "GENERATOR_SCENARIO".to_owned(), + value: Some(config.scenario.to_owned()), + ..Default::default() + }, + ]; + + + JobSpec { + backoff_limit: Some(1), + template: PodTemplateSpec { + metadata: Some(ObjectMeta { + labels: Some(BTreeMap::from_iter(vec![( + "name".to_owned(), + "load-gen-job".to_owned(), + )])), + ..Default::default() + }), + spec: Some(PodSpec { + hostname: Some("job".to_owned()), + subdomain: Some("load-gen-job".to_owned()), + containers: vec![Container { + name: "job".to_owned(), + image: Some(config.job_image_config.image), + image_pull_policy: Some(config.job_image_config.image_pull_policy), + command: Some(vec![ + "/usr/bin/keramik-runner".to_owned(), + "generate_load".to_owned(), + ]), + env: Some(env_vars), + volume_mounts: Some(vec![VolumeMount { + mount_path: "/keramik-peers".to_owned(), + name: "keramik-peers".to_owned(), + ..Default::default() + }]), + ..Default::default() + }], + volumes: Some(vec![Volume { + config_map: Some(ConfigMapVolumeSource { + default_mode: Some(0o755), + name: Some(PEERS_CONFIG_MAP_NAME.to_owned()), + ..Default::default() + }), + name: "keramik-peers".to_owned(), + ..Default::default() + }]), + restart_policy: Some("Never".to_owned()), + ..Default::default() + }), + }, + ..Default::default() + } +} \ No newline at end of file diff --git a/operator/src/lgen/mod.rs b/operator/src/lgen/mod.rs new file mode 100644 index 00000000..c8be9744 --- /dev/null +++ b/operator/src/lgen/mod.rs @@ -0,0 +1,3 @@ +pub mod controller; +pub mod job; +pub mod spec; \ No newline at end of file diff --git a/operator/src/lgen/spec.rs b/operator/src/lgen/spec.rs new file mode 100644 index 00000000..602d145d --- /dev/null +++ b/operator/src/lgen/spec.rs @@ -0,0 +1,48 @@ +use kube::CustomResource; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +/// Primary CRD for creating and managing a Load Generator. +#[derive(CustomResource, Serialize, Deserialize, Debug, Default, PartialEq, Clone, JsonSchema)] +#[kube( + group = "keramik.3box.io", + version = "v1alpha1", + kind = "LoadGenerator", + plural = "loadgenerators", + status = "LoadGeneratorStatus", + derive = "PartialEq", + namespaced +)] +#[serde(rename_all = "camelCase")] +pub struct LoadGeneratorSpec { + /// Load generator scenario + pub scenario: String, + /// Number of users + pub users: u32, + /// Time in minutes to run the load generator + pub run_time: u32, + /// Image for all jobs created by the load generator. + pub image: Option, + /// Pull policy for image. + pub image_pull_policy: Option, + /// Throttle requests (per second) for a load generator. Currently on a per-worker basis. + pub throttle_requests: Option, + /// Request target for the scenario to be a success. + pub success_request_target: Option, + /// Enable dev mode for the load generator. + pub(crate) dev_mode: Option, + /// Log level to use. + pub(crate) log_level: Option, + /// Anchor wait time in seconds, use with ceramic-anchoring-benchmark scenario + pub anchor_wait_time: Option, + /// Network type to use for the load generator, use with cas-benchmark scenario + pub cas_network: Option, + /// Controller DID for the load generator, use with cas-benchmark scenario + pub cas_controller: Option, +} + +#[derive(Serialize, Deserialize, Debug, Default, PartialEq, Clone, JsonSchema)] +pub struct LoadGeneratorStatus { + pub name: String, + pub nonce: u32, +} \ No newline at end of file diff --git a/operator/src/main.rs b/operator/src/main.rs index 36308612..6679c146 100644 --- a/operator/src/main.rs +++ b/operator/src/main.rs @@ -1,4 +1,4 @@ -//! Operator is a long lived process that auotmates creating and managing Ceramic networks. +//! Operator is a long lived process that automates creating and managing Ceramic networks. #![deny(missing_docs)] use anyhow::Result; use clap::{command, Parser, Subcommand}; @@ -38,7 +38,8 @@ async fn main() -> Result<()> { Command::Daemon => { tokio::join!( keramik_operator::network::run(), - keramik_operator::simulation::run() + keramik_operator::simulation::run(), + keramik_operator::lgen::controller::run(), ); } }; @@ -54,4 +55,4 @@ async fn main() -> Result<()> { shutdown_meter_provider(); Ok(()) -} +} \ No newline at end of file diff --git a/runner/src/load_generator/gen.rs b/runner/src/load_generator/gen.rs new file mode 100644 index 00000000..722c66ee --- /dev/null +++ b/runner/src/load_generator/gen.rs @@ -0,0 +1,223 @@ +use std::collections::HashMap; +use std::path::PathBuf; + +use anyhow::Result; +use ceramic_core::StreamId; +use clap::Args; +use keramik_common::peer_info::Peer; +use tokio::time::{Duration, Instant};use crate::utils::parse_peers_info; +use crate::CommandResult; +use crate::load_generator::utils::generator_utils::StableLoadUser; +use crate::load_generator::utils::generator_utils::CeramicConfig; +use crate::load_generator::utils::generator_utils::CeramicDidType; +use crate::load_generator::utils::generator_utils::CeramicScenarioParameters; + +// TODO : Use this to envoke a particular scenario, currently we only have one +// so this is unused +pub enum WeekLongSimulationScenarios { + CreateModelInstancesSynced, +} + +/// Options to Simulate command +#[derive(Args, Debug)] +pub struct WeekLongSimulationOpts { + + /// Simulation scenario to run. + #[arg(long, env = "GENERATOR_SCENARIO")] + scenario: String, + + + /// Path to file containing the list of peers. + /// File should contian JSON encoding of Vec. + #[arg(long, env = "GENERATOR_PEERS_PATH")] + peers: PathBuf, + + /// Implmentation details: A task corresponds to a tokio task responsible + /// for making requests. They should have low memory overhead, so you can + /// create many tasks and then use `throttle_requests_rate` to constrain the overall + /// throughput on the node (specifically the HTTP requests made). + #[arg(long, default_value_t = 4, env = "GENERATOR_TASKS")] + tasks: usize, + + /// Duration of the simulation in hours + #[arg(long, env = "GENERATOR_RUN_TIME", default_value = "5h")] + run_time: String, + + + /// Unique value per test run to ensure uniqueness across different generator runs + #[arg(long, env = "GENERATOR_NONCE")] + nonce: u64, + + /// Option to throttle requests (per second) for load control + #[arg(long, env = "GENERATOR_THROTTLE_REQUESTS_RATE")] + throttle_requests_rate: Option, + +} + +//TODO : Use week long simulation scenario and separate out the logic which is ties to a particular scenario +// TODO : This specific behavior is for createModelInstancesSynced scenario +pub async fn simulate_load(opts: WeekLongSimulationOpts) -> Result { + let state = WeekLongSimulationState::try_from_opts(opts).await?; + + // Create two configs to simulate two independent nodes, each having it's own ceramic client + let config_1 = state.initialize_config().await?; + let config_2 = state.initialize_config().await?; + + let peer_addr_1 = state.peers[0].ceramic_addr().expect("Peer does not have a ceramic address"); + let peer_addr_2 = state.peers[1].ceramic_addr().expect("Peer does not have a ceramic address"); + + // Create two users to simulate two independent nodes + let stable_load_user_1 = StableLoadUser::setup_stability_test(config_1.admin_cli, Some(peer_addr_1.to_string())).await; + let stable_load_user_2 = StableLoadUser::setup_stability_test(config_2.admin_cli, Some(peer_addr_2.to_string())).await; + + // Generate a model for the users to create + let model = stable_load_user_1.ceramic_utils.generate_random_model().await?; + + // Index the model on the second node + stable_load_user_2.ceramic_utils.index_model(&model).await?; + + let run_time: u64 = state.run_time.parse().expect("Failed to parse run_time as u64"); + + println!("Model: {:?}", model); + let model_instance_creation_result = create_model_instances_continuously(stable_load_user_1, model, run_time).await; + println!("Model instance creation result: {:?}", model_instance_creation_result); + + Ok(CommandResult::Success) +} + +/** + * Create model instances continuously + * + * @param stable_load_user The user to create the model instances + * @param model The model schema to create model instances from + * @param duration_in_hours The duration to run the simulation in hours + * @return The result of the simulation + */ +pub async fn create_model_instances_continuously( + stable_load_user: StableLoadUser, + model: StreamId, + duration_in_hours: u64, +) -> Result<()> { + let start_time = Instant::now(); + + let duration = Duration::from_secs(duration_in_hours * 60 * 60); + let mut count = 0; + let mut error_map: HashMap = HashMap::new(); + // TODO : Make the rps configurable + // TODO : Make the channel size configurable + // TODO : Make the number of tasks configurable : tasks are currently 100 - + // increasing tasks can help increase throughput + let (tx, mut rx) = tokio::sync::mpsc::channel(10000); + let mut tasks = tokio::task::JoinSet::new(); + for i in 0..100 { + let user_clone = stable_load_user.clone(); + let model = model.clone(); + let tx = tx.clone(); + tasks.spawn(async move { + loop { + if start_time.elapsed() > duration { + println!("loop {i} Duration expired"); + break; + } + match tokio::time::timeout( + Duration::from_secs(5), + user_clone.ceramic_utils.create_random_mid(&model), + ) + .await + { + Ok(Ok(mid)) => { + match tx.send(Ok(mid.to_string())).await { + Ok(_) => {} + Err(e) => { + println!("Failed to send MID: {}", e); + } + } + } + Ok(Err(e)) => { + match tx.send(Err(e.to_string())).await { + Ok(_) => {} + Err(e) => { + println!("Failed to send error: {}", e); + } + } + } + Err(e) => { + match tx.send(Err(e.to_string())).await { + Ok(_) => {} + Err(e) => { + println!("Failed to send error: {}", e); + } + } + } + } + } + }); + } + drop(tx); + loop { + let mut mid_vec: Vec> = Vec::new(); + if rx.recv_many(&mut mid_vec, 10).await > 0 { + for mid in mid_vec { + match mid { + Ok(_) => { + count += 1; + } + Err(err) => { + *error_map.entry(err).or_insert(0) += 1; + } + } + } + } + if start_time.elapsed() > duration { + tasks.abort_all(); + break; + } + } + // After the loop, print the error map + // TODO : Add observability to this, report these errors/counts + println!("Error counts:"); + for (error, count) in &error_map { + println!("Error: {}, Count: {}", error, count); + } + println!("Created {} MIDs in {} hours", count, duration_in_hours); + println!( + "Failed to create {} MIDs in {} hours", + error_map.values().sum::(), duration_in_hours +); + Ok(()) +} + +struct WeekLongSimulationState { + pub peers: Vec, + pub run_time: String, +} + + +impl WeekLongSimulationState { + /** + * Try to create a new instance of the WeekLongSimulationState from the given options + * + * @param opts The options to use + * @return The created instance + */ + async fn try_from_opts(opts: WeekLongSimulationOpts) -> Result { + Ok(Self { + peers: parse_peers_info(opts.peers.clone()).await?, + run_time: opts.run_time, + }) + } + + /** + * Initialize the configuration for the WeekLongSimulationState + * + * @return The created configuration + */ + async fn initialize_config(&self) -> Result { + // Create a CeramicScenarioParameters instance with default values + let params = CeramicScenarioParameters { + did_type: CeramicDidType::EnvInjected, + }; + + CeramicConfig::initialize_config(params).await + } +} \ No newline at end of file diff --git a/runner/src/load_generator/mod.rs b/runner/src/load_generator/mod.rs new file mode 100644 index 00000000..9dd541b9 --- /dev/null +++ b/runner/src/load_generator/mod.rs @@ -0,0 +1,3 @@ + +pub mod gen; +pub mod utils; diff --git a/runner/src/load_generator/utils/ceramic_models_utils.rs b/runner/src/load_generator/utils/ceramic_models_utils.rs new file mode 100644 index 00000000..b7e22ef8 --- /dev/null +++ b/runner/src/load_generator/utils/ceramic_models_utils.rs @@ -0,0 +1,167 @@ +use anyhow::Result; +use ceramic_http_client::{ + api::{self}, + ceramic_event::StreamId, + ModelAccountRelation, ModelDefinition, +}; +use reqwest::Client; +use crate::scenario::ceramic::models::{RandomModelInstance, SmallModel}; +use crate::scenario::ceramic::CeramicClient; + +#[derive(Clone, Debug)] +pub struct CeramicModelUtil { + /** + * The ceramic client + */ + pub ceramic_client: CeramicClient, + /** + * The http client + */ + pub http_client: Client, + /** + * The base URL + */ + pub base_url: Option, +} + + +impl CeramicModelUtil { + /** + * Index a model + * + * @param model_id The model to index + */ + pub async fn index_model(&self, model_id: &StreamId) -> Result<()> { + let admin_code = self.get_admin_code().await?; + println!("Admin code: {:?}", admin_code); + let url = self.build_url(&self.ceramic_client.index_endpoint()).await.unwrap(); + let req = self + .ceramic_client + .create_index_model_request(model_id, &admin_code) + .unwrap(); + let resp = self.http_client.post(url).json(&req).send().await?; + if resp.status().is_success() { + Ok(()) + } else { + Err(anyhow::anyhow!("Failed to index model")) + } + } + + /** + * Generate a random model + * + * @return The stream id of the created model + */ + pub async fn generate_random_model(&self) -> Result { + let small_model = + ModelDefinition::new::("load_test_small_model", ModelAccountRelation::List) + .unwrap(); + self.setup_model(small_model).await + } + + /** + * Setup a model + * + * @param model The model to setup + * @return The stream id of the created model + */ + async fn setup_model(&self, model: ModelDefinition) -> Result { + let url = self + .build_url(&self.ceramic_client.streams_endpoint()) + .await + .unwrap(); + info!("URL: {}", url); + let req = self.ceramic_client.create_model_request(&model).await.unwrap(); + let req = self.http_client.post(url).json(&req); + let resp: reqwest::Response = req.send().await?; + if resp.status() == reqwest::StatusCode::OK { + let streams_response: api::StreamsResponse = resp.json().await?; + info!("Stream ID: {:?}", streams_response.stream_id); + Ok(streams_response.stream_id) + } else { + Err(anyhow::anyhow!( + "Failed to setup model: status {:?} , resp_text {:?}", + resp.status(), + resp.text().await + )) + } + } + + /** + * Create a random model instance + * + * @param model The model which defines the schema of the model instance + * @return The stream id of the created model instance + */ + pub async fn create_random_mid(&self, model: &StreamId) -> Result { + let data = SmallModel::random(); + return self.create_mid(model, &data).await; + } + + /** + * Create a model instance + * + * @param model The model which defines the schema of the model instance + * @param data The data to create + * @return The stream id of the created model instance + */ + async fn create_mid(&self, model: &StreamId, data: &SmallModel) -> Result { + let url = self + .build_url(&self.ceramic_client.streams_endpoint()) + .await + .unwrap(); + let req = self + .ceramic_client + .create_list_instance_request(model, data) + .await + .unwrap(); + let req = self.http_client.post(url).json(&req); + let resp: reqwest::Response = req.send().await?; + if resp.status() == reqwest::StatusCode::OK { + let parsed_resp: api::StreamsResponse = resp.json().await?; + Ok(parsed_resp.stream_id) + } else { + Err(anyhow::anyhow!( + "Failed to create model: status {:?} , resp_text {:?}", + resp.status(), + resp.text().await + )) + } + } + + /** + * Get the admin code + * + * @return The admin code + */ + async fn get_admin_code(&self) -> Result { + let url = self.build_url(&self.ceramic_client.admin_code_endpoint()).await.unwrap(); + let resp = self.http_client.get(url).send().await?; + println!("Admin code response: {:?}", &resp); + let admin_code_resp: api::AdminCodeResponse = resp.json().await?; + println!("Admin code response: {:?}", admin_code_resp); + let code = &admin_code_resp.code; + println!("Admin code: {:?}", code); + Ok(code.to_string()) + } + + /** + * Build a URL + * + * @param path The path to build the URL from + * @return The built URL + */ + async fn build_url(&self, path: &str) -> Result { + let base = self + .base_url + .as_ref() + .ok_or_else(|| anyhow::anyhow!("Base URL is not set"))?; + let separator = if path.starts_with('/') || base.ends_with('/') { + "" + } else { + "/" + }; + let full_url = format!("{}{}{}", base, separator, path); + Ok(full_url) + } +} \ No newline at end of file diff --git a/runner/src/load_generator/utils/generator_utils.rs b/runner/src/load_generator/utils/generator_utils.rs new file mode 100644 index 00000000..d03d5102 --- /dev/null +++ b/runner/src/load_generator/utils/generator_utils.rs @@ -0,0 +1,114 @@ +use anyhow::Result; +use reqwest::Client; +use std::time::Duration; +use ceramic_http_client::CeramicHttpClient; +use crate::scenario::ceramic::Credentials; +use crate::scenario::ceramic::CeramicClient; + +use super::ceramic_models_utils::CeramicModelUtil; + +pub static HTTP_TIMEOUT: Duration = Duration::from_secs(5); +pub static HTTP_POOL_MAX_IDLE_PER_HOST: usize = 300; + +#[derive(Clone, Debug)] +pub struct CeramicConfig { + pub admin_cli: CeramicClient, + pub user_cli: CeramicClient, + pub params: CeramicScenarioParameters, +} + +#[derive(Clone, Debug)] +pub struct CeramicScenarioParameters { + pub did_type: CeramicDidType, +} + +#[derive(Clone, Debug)] +pub enum CeramicDidType { + // Fetch DID from env + EnvInjected, + // Generate DID from scratch + UserGenerated, +} + + +impl CeramicConfig { + pub async fn initialize_config(params: CeramicScenarioParameters) -> Result { + let creds = Credentials::admin_from_env().await?; + let admin_cli = CeramicHttpClient::new(creds.signer); + + let user_cli = match params.did_type { + CeramicDidType::EnvInjected => { + let creds = Credentials::from_env().await?; + CeramicHttpClient::new(creds.signer) + } + CeramicDidType::UserGenerated => { + let creds = Credentials::new_generate_did_key().await?; + CeramicHttpClient::new(creds.signer) + } + }; + + Ok(Self { + admin_cli, + user_cli, + params, + }) + } +} + +/** + * The StableLoadUser struct with an HTTP client tied to a ceramic client and a throttle rate. + */ +#[derive(Clone)] +pub struct StableLoadUser { + /** + * The ceramic client connected to the target peer + */ + pub ceramic_client: CeramicClient, + /** + * The HTTP client to send the requests + */ + pub http_client: Client, + /** + * Maximum number of requests to send per second + */ + pub throttle_rate: Duration, + /** + * The base URL + */ + pub base_url: Option, + /** + * Methods associated with the ceramic client + */ + pub ceramic_utils: CeramicModelUtil, +} + +// Methods associated with StableLoadUser +impl StableLoadUser { + + // TODO : Write a setup function which creates the struct by accepting a targetPeerAddress and ceramicClient and returns a StabilityTestUtils + pub async fn setup_stability_test( + ceramic_client: CeramicClient, + base_url: Option, + ) -> StableLoadUser { + let http_client = Client::builder() + .timeout(HTTP_TIMEOUT) + .cookie_store(false) + .pool_max_idle_per_host(HTTP_POOL_MAX_IDLE_PER_HOST) + .build() + .unwrap(); + + let ceramic_utils = CeramicModelUtil { + ceramic_client: ceramic_client.clone(), + http_client: http_client.clone(), + base_url: base_url.clone(), + }; + + return StableLoadUser { + ceramic_client, + http_client, + throttle_rate: Duration::from_millis(100), + base_url, + ceramic_utils, + }; + } +} diff --git a/runner/src/load_generator/utils/mod.rs b/runner/src/load_generator/utils/mod.rs new file mode 100644 index 00000000..1657bf8c --- /dev/null +++ b/runner/src/load_generator/utils/mod.rs @@ -0,0 +1,2 @@ +pub mod generator_utils; +pub mod ceramic_models_utils; diff --git a/runner/src/main.rs b/runner/src/main.rs index 7ab68edb..fe0a17d9 100644 --- a/runner/src/main.rs +++ b/runner/src/main.rs @@ -5,16 +5,16 @@ mod bootstrap; mod scenario; mod simulate; mod utils; +mod load_generator; +use crate::gen::simulate_load; use keramik_common::telemetry; - use anyhow::Result; use clap::{Parser, Subcommand}; use opentelemetry::global::{shutdown_meter_provider, shutdown_tracer_provider}; use opentelemetry::{global, KeyValue}; use tracing::info; - -use crate::{bootstrap::bootstrap, simulate::simulate}; +use crate::{bootstrap::bootstrap, simulate::simulate, load_generator::gen}; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] @@ -39,6 +39,8 @@ pub enum Command { Simulate(simulate::Opts), /// Do nothing and exit Noop, + // TODO: Generate load, currently this command is not used + GenerateLoad(gen::WeekLongSimulationOpts), } impl Command { @@ -47,6 +49,8 @@ impl Command { Command::Bootstrap(_) => "bootstrap", Command::Simulate(_) => "simulate", Command::Noop => "noop", + // TODO : After making operator changes this command will be used to generate load + Command::GenerateLoad(_) => "generate_load", } } } @@ -63,11 +67,20 @@ pub enum CommandResult { Failure(anyhow::Error), } +// TODO : Enable metrics/tracing for load generator command +// Metrics and tracing have been disabled for load generator due to memory issues. +// Memory grows in the runner when this is enabled not making it live long enough to finish the load generation #[tokio::main] async fn main() -> Result<()> { let args = Cli::parse(); - telemetry::init_tracing(Some(args.otlp_endpoint.clone())).await?; - let metrics_controller = telemetry::init_metrics_otlp(args.otlp_endpoint.clone()).await?; + if !matches!(args.command, Command::GenerateLoad(_)) { + telemetry::init_tracing(Some(args.otlp_endpoint.clone())).await?; + } + let metrics_controller = if matches!(args.command, Command::GenerateLoad(_)) { + None + } else { + Some(telemetry::init_metrics_otlp(args.otlp_endpoint.clone()).await?) + }; info!("starting runner"); let meter = global::meter("keramik"); @@ -79,17 +92,21 @@ async fn main() -> Result<()> { runs.add(1, &[KeyValue::new("command", args.command.name())]); info!(?args.command, ?args.otlp_endpoint, "starting runner"); - let success = match args.command { + let success = match args.command.clone() { Command::Bootstrap(opts) => bootstrap(opts).await?, Command::Simulate(opts) => simulate(opts).await?, + Command::GenerateLoad(opts) => simulate_load(opts).await?, Command::Noop => CommandResult::Success, }; - - // Flush traces and metrics before shutdown - shutdown_tracer_provider(); - metrics_controller.force_flush()?; - drop(metrics_controller); - shutdown_meter_provider(); + if !matches!(args.command, Command::GenerateLoad(_)) { + // Flush traces and metrics before shutdown + shutdown_tracer_provider(); + if let Some(metrics_controller) = metrics_controller { + metrics_controller.force_flush()?; + } + drop(metrics_controller); + shutdown_meter_provider(); + } // This fixes lost metrics not sure why :( // Seems to be related to the inflight gRPC request getting cancelled