From 7f4d9e585f8de0ce5b6532b3e401ca0138b6c884 Mon Sep 17 00:00:00 2001 From: Michele Baldessari Date: Tue, 28 Jan 2025 11:43:18 +0100 Subject: [PATCH 1/5] Add a yaml-flattening ruby script This will transform a yaml like this one: metadata_version: "1.0" name: industrial-edge pattern_version: "2.0" display_name: Industrial Edge repo_url: https://github.com/mbaldessari/industrial-edge docs_repo_url: https://github.com/mbaldessari/docs issues_url: https://github.com/validatedpatterns/industrial-edge/issues docs_url: https://validatedpatterns.io/patterns/industrial-edge/ ci_url: https://validatedpatterns.io/ci/?pattern=industrialedge tier: tested owners: mbaldessari, darkdoc requirements: hub: # Main cluster compute: platform: gcp: replicas: 5 type: n1-standard-16 azure: replicas: 5 type: Standard_D16s_v5 aws: replicas: 4 type: m5.4xlarge controlPlane: platform: gcp: replicas: 3 type: n1-standard-16 azure: replicas: 3 type: Standard_D16s_v3 aws: replicas: 3 type: m5.4xlarge spoke: # optional - represents the clusters imported into ACM compute: platform: gcp: replicas: 3 type: n1-standard-16 azure: replicas: 3 type: Standard_D16s_v5 aws: replicas: 3 type: m5.2xlarge controlPlane: platform: gcp: replicas: 3 type: n1-standard-16 azure: replicas: 3 type: Standard_D16s_v5 aws: replicas: 3 type: m5.2xlarge extra_features: hypershift_support: false spoke_support: true external_requirements: To this: :metadata_version: 1.0 :name: industrial-edge :pattern_version: 2.0 :display_name: Industrial Edge :repo_url: https://github.com/mbaldessari/industrial-edge :docs_repo_url: https://github.com/mbaldessari/docs :issues_url: https://github.com/validatedpatterns/industrial-edge/issues :docs_url: https://validatedpatterns.io/patterns/industrial-edge/ :ci_url: https://validatedpatterns.io/ci/?pattern=industrialedge :tier: tested :owners: mbaldessari, darkdoc :requirements_hub_compute_platform_gcp_replicas: 5 :requirements_hub_compute_platform_gcp_type: n1-standard-16 :requirements_hub_compute_platform_azure_replicas: 5 :requirements_hub_compute_platform_azure_type: Standard_D16s_v5 :requirements_hub_compute_platform_aws_replicas: 4 :requirements_hub_compute_platform_aws_type: m5.4xlarge :requirements_hub_controlPlane_platform_gcp_replicas: 3 :requirements_hub_controlPlane_platform_gcp_type: n1-standard-16 :requirements_hub_controlPlane_platform_azure_replicas: 3 :requirements_hub_controlPlane_platform_azure_type: Standard_D16s_v3 :requirements_hub_controlPlane_platform_aws_replicas: 3 :requirements_hub_controlPlane_platform_aws_type: m5.4xlarge :requirements_spoke_compute_platform_gcp_replicas: 3 :requirements_spoke_compute_platform_gcp_type: n1-standard-16 :requirements_spoke_compute_platform_azure_replicas: 3 :requirements_spoke_compute_platform_azure_type: Standard_D16s_v5 :requirements_spoke_compute_platform_aws_replicas: 3 :requirements_spoke_compute_platform_aws_type: m5.2xlarge :requirements_spoke_controlPlane_platform_gcp_replicas: 3 :requirements_spoke_controlPlane_platform_gcp_type: n1-standard-16 :requirements_spoke_controlPlane_platform_azure_replicas: 3 :requirements_spoke_controlPlane_platform_azure_type: Standard_D16s_v5 :requirements_spoke_controlPlane_platform_aws_replicas: 3 :requirements_spoke_controlPlane_platform_aws_type: m5.2xlarge :extra_features_hypershift_support: false :extra_features_spoke_support: true :external_requirements: This will allow us to consume this metadata information from asciidoc. --- utils/flatten_yaml.rb | 48 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100755 utils/flatten_yaml.rb diff --git a/utils/flatten_yaml.rb b/utils/flatten_yaml.rb new file mode 100755 index 000000000..dd9bc073e --- /dev/null +++ b/utils/flatten_yaml.rb @@ -0,0 +1,48 @@ +#!/usr/bin/env ruby + +# This reads a yaml file and outputs all yaml leaf objects +# as ascii doc variables. The idea is to read in the pattern-metadata.yaml +# file and output it wholesale into a file so it can be consumed +# by asciidoc + +require 'yaml' + +if ARGV.length != 1 + puts "Please run: #{$0} " + exit(1) +end + +def flatten_hash(hash, prefix = "") + flat_hash = {} + hash.each do |key, value| + new_key = prefix.empty? ? key.to_s : "#{prefix}_#{key}" + if value.is_a?(Hash) + flat_hash.merge!(flatten_hash(value, new_key)) + else + flat_hash[new_key] = value + end + end + flat_hash +end + +def process_yaml(file_path) + begin + yaml_data = YAML.load_file(file_path) + flat_data = flatten_hash(yaml_data) + + puts "# This file has been generated automatically from the pattern-metadata.yaml file" + puts "# Do not edit manually!" + # Print each key-value pair with a leading column to define an + # ascii doc variable + flat_data.each do |key, value| + puts ":#{key}: #{value}" + end + rescue Errno::ENOENT + puts "Error: File not found at '#{file_path}'. Please provide a valid file path." + rescue Psych::SyntaxError => e + puts "Error: Syntax error in YAML file. Details:" + puts e.message + end +end + +process_yaml(ARGV[0]) From c7dc571a7940567f972aac4d679964d50a0ae29d Mon Sep 17 00:00:00 2001 From: Michele Baldessari Date: Tue, 28 Jan 2025 12:25:19 +0100 Subject: [PATCH 2/5] Add central metadata doc workflow --- .github/workflows/metadata-docs.yml | 101 ++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 .github/workflows/metadata-docs.yml diff --git a/.github/workflows/metadata-docs.yml b/.github/workflows/metadata-docs.yml new file mode 100644 index 000000000..7bb5414ea --- /dev/null +++ b/.github/workflows/metadata-docs.yml @@ -0,0 +1,101 @@ +# This GH action has the goal to fetch the pattern-metadata.yaml file (if existing) +# and output all of its flattened yaml structure into asciidoc variables for the pattern +# +# It needs a secret called DOCS_TOKEN to be set in the pattern's repository +# that calls this GH action. It has to be a PAT token with the following +# permissions on the `validatedpatterns/docs` repository +# * Read access to actions and metadata +# * Read and Write access to code and pull requests +# +# This job will checkout the docs repo and propose a PR to where it updates +# the file ./modules//metadata-.yaml where the pattern's +# metadata will land transformed into asciidoc variables. +# +# Note: This action is to be imported from a pattern and not used in the docs repo +# itself. We maintain it in the docs repo in order to make it easier to have a single +# workflow across all patterns +--- +name: Update docs from pattern's metadata + +on: + workflow_call: + inputs: + DOCS_BRANCH: + description: "Branch of the docs git repo to use" + required: false + type: string + default: "main" + +env: + DOCS_DIR: docs + PATTERN_DIR: pattern + METADATA: pattern-metadata.yaml + GIT_EMAIL: vp-team@redhat.com + GIT_USER: Github Actions + +jobs: + docs-push: + if: | + github.repository_owner == 'validatedpatterns' || + github.repository_owner == 'validatedpatterns-sandbox' || + github.repository_owner == 'validatedpatterns-demos' + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + id-token: write + + steps: + - name: Checkout pattern repository + uses: actions/checkout@v4 + with: + path: ${{ env.PATTERN_DIR }} + + - name: Fail if this repository is different than the one in metadata + run: |- + set -e + repo=$(yq -r .repo_url "${{ env.METADATA }}") + full_url="https://github.com/${{ github.repository }}" + if [ "${full_url}" != "${repo}" ]; then + echo "Error ${{ github.repository }} != ${full_url}" + exit 1 + fi + docs_repo=$(yq -r .docs_repo_url "${{ env.METADATA }}" | sed -e 's%https://github.com/%%') + pattern=$(yq -r .name "${{ env.METADATA }}") + { + echo "DOCS_PR_BRANCH=sizing-pr-${pattern}" + echo "DOCS_REPO=${docs_repo}" + echo "PATTERN=${pattern}" + } >> "${GITHUB_ENV}" + working-directory: ${{ env.PATTERN_DIR }} + + - name: Checkout docs repository + uses: actions/checkout@v4 + with: + path: ${{ env.DOCS_DIR }} + repository: ${{ env.DOCS_REPO }} + ref: ${{ inputs.DOCS_BRANCH }} + token: ${{ secrets.DOCS_TOKEN }} + + - name: Template the cluster variables on to the patterns-variables .adoc file + run: |- + set -e + ./${{ env.DOCS_DIR }}/utils/flatten_yaml.rb \ + ./${{ env.PATTERN_DIR }}/pattern-metadata.yaml | \ + tee "${{ env.DOCS_DIR }}/modules/metadata-${{ env.PATTERN }}.adoc" + + - name: Push to docs git repo + run: |- + set -e + git config --global user.email "${{ env.GIT_EMAIL }}" + git config --global user.name "${{ env.GIT_USER }}" + git checkout -B "${{ env.DOCS_PR_BRANCH }}" "${{ inputs.DOCS_BRANCH }}" + git add modules/metadata-${{ env.PATTERN }}.adoc + git commit -m "Update cluster variables for ${{ env.PATTERN }}" || (echo "Nothing to commit"; exit 0) + git push origin "${{ env.DOCS_PR_BRANCH }}" -f + gh pr create -B "${{ inputs.DOCS_BRANCH }}" -H "${{ env.DOCS_PR_BRANCH }}" \ + --title 'Merge cluster variables change for ${{ env.PATTERN }}' --body 'Created by Github action' || \ + gh pr edit -B "${{ inputs.DOCS_BRANCH }}" --title 'Cluster variables change for ${{ env.PATTERN }}' --body 'Created by Github action' + working-directory: ${{ env.DOCS_DIR }} + env: + GITHUB_TOKEN: ${{ secrets.DOCS_TOKEN }} From e0cf27b14f777a4827feea5dbf318e990448d8ac Mon Sep 17 00:00:00 2001 From: Michele Baldessari Date: Tue, 28 Jan 2025 14:12:22 +0100 Subject: [PATCH 3/5] Add an initial cluster sizing template --- modules/cluster-sizing-template.adoc | 97 ++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 modules/cluster-sizing-template.adoc diff --git a/modules/cluster-sizing-template.adoc b/modules/cluster-sizing-template.adoc new file mode 100644 index 000000000..858ce438b --- /dev/null +++ b/modules/cluster-sizing-template.adoc @@ -0,0 +1,97 @@ +:_content-type: CONCEPT +:imagesdir: ../../images + +[id="{name}-openshift-hub-cluster-size"] += {display_name} pattern hub/datacenter cluster size + +The {display_name} pattern has been tested with a defined set of specifically +tested configurations that represent the most common combinations that Red Hat +OpenShift Container Platform customers are using or deploying for the x86_64 +architecture. + +The datacenter hub OpenShift cluster uses the following the deployment configuration: + +.Hub cluster minimum requirements +[cols="<,^,<,<"] +|=== +| Cloud Provider | Node Type | Number of nodes | Instance Type + +ifdef::requirements_hub_controlPlane_platform_aws_replicas[] +| Amazon Web Services +| Control Plane +| {requirements_hub_controlPlatform_platform_aws_replicas} +| {requirements_hub_controlPlatform_platform_aws_type} +| Amazon Web Services +| Worker +| {requirements_hub_compute_platform_aws_replicas} +| {requirements_hub_compute_platform_aws_type} +endif::requirements_hub_controlPlane_platform_aws_replicas[] + +ifdef::requirements_hub_controlPlane_platform_gcp_replicas[] +| Google Cloud Platform +| Control Plane +| {requirements_hub_controlPlatform_platform_gcp_replicas} +| {requirements_hub_controlPlatform_platform_gcp_type} +| Google Cloud Platform +| Worker +| {requirements_hub_compute_platform_gcp_replicas} +| {requirements_hub_compute_platform_gcp_type} +endif::requirements_hub_controlPlane_platform_gcp_replicas[] + +ifdef::requirements_hub_controlPlane_platform_azure_replicas[] +| Microsoft Azure +| Control Plane +| {requirements_hub_controlPlatform_platform_azure_replicas} +| {requirements_hub_controlPlatform_platform_azure_type} +| Microsoft Azure +| Worker +| {requirements_hub_compute_platform_azure_replicas} +| {requirements_hub_compute_platform_azure_type} +endif::requirements_hub_controlPlane_platform_azure_replicas[] + +|=== + +ifeval::["{extra_features_spoke_support}" == "true"] +== {display_name} spoke/managed cluster size minimum requirements + +.Spoke cluster minimum requirements +[cols="<,^,<,<"] +|=== +| Cloud Provider | Node Type | Number of nodes | Instance Type + +ifdef::requirements_spoke_controlPlane_platform_aws_replicas[] +| Amazon Web Services +| Control Plane +| {requirements_spoke_controlPlatform_platform_aws_replicas} +| {requirements_spoke_controlPlatform_platform_aws_type} +| Amazon Web Services +| Worker +| {requirements_spoke_compute_platform_aws_replicas} +| {requirements_spoke_compute_platform_aws_type} +endif::requirements_spoke_controlPlane_platform_aws_replicas[] + +ifdef::requirements_spoke_controlPlane_platform_gcp_replicas[] +| Google Cloud Platform +| Control Plane +| {requirements_spoke_controlPlatform_platform_gcp_replicas} +| {requirements_spoke_controlPlatform_platform_gcp_type} +| Google Cloud Platform +| Worker +| {requirements_spoke_compute_platform_gcp_replicas} +| {requirements_spoke_compute_platform_gcp_type} +endif::requirements_spoke_controlPlane_platform_gcp_replicas[] + +ifdef::requirements_spoke_controlPlane_platform_azure_replicas[] +| Microsoft Azure +| Control Plane +| {requirements_spoke_controlPlatform_platform_azure_replicas} +| {requirements_spoke_controlPlatform_platform_azure_type} +| Microsoft Azure +| Worker +| {requirements_spoke_compute_platform_azure_replicas} +| {requirements_spoke_compute_platform_azure_type} +endif::requirements_spoke_controlPlane_platform_azure_replicas[] + +|=== + +endif::[] From a292c35ae6497d90cdaf34a573091a19b0faa1ea Mon Sep 17 00:00:00 2001 From: Michele Baldessari Date: Tue, 28 Jan 2025 18:28:26 +0100 Subject: [PATCH 4/5] Add an example of how CI would generate metadata file This will be regenerated in the future by the pattern (in this case IE) GH action itself. --- .../metadata-industrial-edge.adoc | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 modules/industrial-edge/metadata-industrial-edge.adoc diff --git a/modules/industrial-edge/metadata-industrial-edge.adoc b/modules/industrial-edge/metadata-industrial-edge.adoc new file mode 100644 index 000000000..a1f728ffa --- /dev/null +++ b/modules/industrial-edge/metadata-industrial-edge.adoc @@ -0,0 +1,38 @@ +:metadata_version: 1.0 +:name: industrial-edge +:pattern_version: 2.0 +:display_name: Industrial Edge +:repo_url: https://github.com/mbaldessari/industrial-edge +:docs_repo_url: https://github.com/mbaldessari/docs +:issues_url: https://github.com/validatedpatterns/industrial-edge/issues +:docs_url: https://validatedpatterns.io/patterns/industrial-edge/ +:ci_url: https://validatedpatterns.io/ci/?pattern=industrialedge +:tier: tested +:owners: mbaldessari, darkdoc +:requirements_hub_compute_platform_gcp_replicas: 5 +:requirements_hub_compute_platform_gcp_type: n1-standard-16 +:requirements_hub_compute_platform_azure_replicas: 5 +:requirements_hub_compute_platform_azure_type: Standard_D16s_v5 +:requirements_hub_compute_platform_aws_replicas: 4 +:requirements_hub_compute_platform_aws_type: m5.4xlarge +:requirements_hub_controlPlane_platform_gcp_replicas: 3 +:requirements_hub_controlPlane_platform_gcp_type: n1-standard-16 +:requirements_hub_controlPlane_platform_azure_replicas: 3 +:requirements_hub_controlPlane_platform_azure_type: Standard_D16s_v3 +:requirements_hub_controlPlane_platform_aws_replicas: 3 +:requirements_hub_controlPlane_platform_aws_type: m5.4xlarge +:requirements_spoke_compute_platform_gcp_replicas: 3 +:requirements_spoke_compute_platform_gcp_type: n1-standard-16 +:requirements_spoke_compute_platform_azure_replicas: 3 +:requirements_spoke_compute_platform_azure_type: Standard_D16s_v5 +:requirements_spoke_compute_platform_aws_replicas: 3 +:requirements_spoke_compute_platform_aws_type: m5.2xlarge +:requirements_spoke_controlPlane_platform_gcp_replicas: 3 +:requirements_spoke_controlPlane_platform_gcp_type: n1-standard-16 +:requirements_spoke_controlPlane_platform_azure_replicas: 3 +:requirements_spoke_controlPlane_platform_azure_type: Standard_D16s_v5 +:requirements_spoke_controlPlane_platform_aws_replicas: 3 +:requirements_spoke_controlPlane_platform_aws_type: m5.2xlarge +:extra_features_hypershift_support: false +:extra_features_spoke_support: true +:external_requirements: From 977b08aa00809d860b16b30fb723324f829e8db0 Mon Sep 17 00:00:00 2001 From: Michele Baldessari Date: Tue, 28 Jan 2025 14:19:25 +0100 Subject: [PATCH 5/5] Add examples --- .github/workflows/metadata-docs.yml | 8 +- .../industrial-edge/cluster-sizing.adoc | 14 ++ .../industrial-edge/cluster-sizing.md | 178 ------------------ modules/cluster-sizing-template.adoc | 52 +++-- 4 files changed, 52 insertions(+), 200 deletions(-) create mode 100644 content/patterns/industrial-edge/cluster-sizing.adoc delete mode 100644 content/patterns/industrial-edge/cluster-sizing.md diff --git a/.github/workflows/metadata-docs.yml b/.github/workflows/metadata-docs.yml index 7bb5414ea..7261c6c62 100644 --- a/.github/workflows/metadata-docs.yml +++ b/.github/workflows/metadata-docs.yml @@ -35,6 +35,7 @@ env: jobs: docs-push: + # We do not want to run this job on forked repositories if: | github.repository_owner == 'validatedpatterns' || github.repository_owner == 'validatedpatterns-sandbox' || @@ -80,9 +81,10 @@ jobs: - name: Template the cluster variables on to the patterns-variables .adoc file run: |- set -e + mkdir -p modules/${{ env.PATTERN }} ./${{ env.DOCS_DIR }}/utils/flatten_yaml.rb \ - ./${{ env.PATTERN_DIR }}/pattern-metadata.yaml | \ - tee "${{ env.DOCS_DIR }}/modules/metadata-${{ env.PATTERN }}.adoc" + ./${{ env.PATTERN_DIR }}/${{ env.METADATA }} | \ + tee "${{ env.DOCS_DIR }}/modules/${{ env.PATTERN }}/metadata-${{ env.PATTERN }}.adoc" - name: Push to docs git repo run: |- @@ -90,7 +92,7 @@ jobs: git config --global user.email "${{ env.GIT_EMAIL }}" git config --global user.name "${{ env.GIT_USER }}" git checkout -B "${{ env.DOCS_PR_BRANCH }}" "${{ inputs.DOCS_BRANCH }}" - git add modules/metadata-${{ env.PATTERN }}.adoc + git add modules/${{ env.PATTERN }}/metadata-${{ env.PATTERN }}.adoc git commit -m "Update cluster variables for ${{ env.PATTERN }}" || (echo "Nothing to commit"; exit 0) git push origin "${{ env.DOCS_PR_BRANCH }}" -f gh pr create -B "${{ inputs.DOCS_BRANCH }}" -H "${{ env.DOCS_PR_BRANCH }}" \ diff --git a/content/patterns/industrial-edge/cluster-sizing.adoc b/content/patterns/industrial-edge/cluster-sizing.adoc new file mode 100644 index 000000000..8bd843838 --- /dev/null +++ b/content/patterns/industrial-edge/cluster-sizing.adoc @@ -0,0 +1,14 @@ +--- +title: Cluster sizing +weight: 50 +aliases: /industrial-edge/mcg-cluster-sizing/ +--- + +:toc: +:imagesdir: /images +:_content-type: ASSEMBLY + +include::modules/comm-attributes.adoc[] +include::modules/industrial-edge/metadata-industrial-edge.adoc[] + +include::modules/cluster-sizing-template.adoc[] diff --git a/content/patterns/industrial-edge/cluster-sizing.md b/content/patterns/industrial-edge/cluster-sizing.md deleted file mode 100644 index e8e549104..000000000 --- a/content/patterns/industrial-edge/cluster-sizing.md +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: Cluster Sizing -weight: 50 -aliases: /industrial-edge/cluster-sizing/ ---- -# OpenShift Cluster Sizing for the Industrial Edge Pattern - -## Tested Platforms - -The **Industrial-Edge** pattern has been tested in the following Certified Cloud Providers. - - -| **Certified Cloud Providers** | 4.8 | 4.9 | 4.10 | -| :---- | :---- | :---- | :---- | -| Amazon Web Services| | | X | -| Microsoft Azure| X | | | -| Google Cloud Platform| | X | | - - -## General OpenShift Minimum Requirements - -OpenShift 4 has the following minimum requirements for sizing of nodes: - -* **Minimum 4 vCPU** (additional are strongly recommended). -* **Minimum 16 GB RAM** (additional memory is strongly recommended, especially if etcd is colocated on masters). -* **Minimum 40 GB** hard disk space for the file system containing /var/. -* **Minimum 1 GB** hard disk space for the file system containing /usr/local/bin/. - -There are several applications that comprise the **industrial-edge** pattern. In addition, the **industrial-edge** pattern also includes a number of supporting operators that are installed by **OpenShift GitOps** using ArgoCD. - -### **Industrial-Edge** Pattern Components - -Here's an inventory of what gets deployed by the **Industrial-Edge** pattern on the Datacenter/Hub OpenShift cluster: - -| Name | Kind | Namespace | Description -| :---- | :---- | :---- | :---- -| line-dashboard | Application | manuela-tst-all | Frontend application -| machine-sensor-1 | Application | manuela-tst-all | Data publisher -| machine-sensor-2 | Application | manuela-tst-all | Data publisher -| messaging | Application | manuela-tst-all | Data subscriber -| mqtt2kafka-integration | Application | manuela-tst-all | Kafka Integration -| anomaly-detection-predictor-0-anomaly-detection | Application | manuela-tst-all | Anomaly detection application -| manuela-kafka-cluster-entity-operator | Operator | manuela-tst-all | Kafka -| Red Hat Advanced Cluster Management | Operator | open-cluster-management | Advance Cluster Management -| Red Hat Integration - AMQ Broker | Operator | manuela-tst-all | AMQ Broker -| Red Hat Integration - AMQ Streams | Operator | manuela-tst-all | AMQ Streams -| Open Data Hub | Operator | openshift-operators | Open Data Hub -| Red Hat OpenShift GitOps | Operator | openshift-operators | OpenShift GitOps -| Red Hat Integration - Camel K | Operator | manuela-tst-all | Integration Platform, Kamelet Binding, Kamelet -| Red Hat OpenShift Pipelines | Operator | All Namespaces | Tekton Config, Pipelines, Triggers, Addons -| Seldon Operator | Operator | manuela-tst-all | Seldon Deployment - -### Industrial-Edge Pattern OpenShift Datacenter HUB Cluster Size - -The Industrial-Edge pattern has been tested with a defined set of specifically tested configurations that represent the most common combinations that Red Hat OpenShift Container Platform (OCP) customers are using or deploying for the x86_64 architecture. - -The Datacenter HUB OpenShift Cluster is made up of the the following on the AWS deployment tested: - -| Node Type | Number of nodes | Cloud Provider | Instance Type -| :---- | :----: | :---- | :---- -| Master | 3 | Amazon Web Services | m5.xlarge -| Worker | 4 | Amazon Web Services | m5.xlarge - -The Datacenter HUB OpenShift cluster needs to be a bit bigger than the Factory/Edge clusters because this is where the developers will be running pipelines to build and deploy the **Industrial Edge** pattern on the cluster. The above cluster sizing is close to a **minimum** size for a Datacenter HUB cluster. In the next few sections we take some snapshots of the cluster utilization while the **Industrial Edge** pattern is running. Keep in mind that resources will have to be added as more developers are working building their applications. - -#### Datacenter Cluster utilization - -Below is a snapshot of the OpenShift cluster utilization while running the **Industrial-Edge** pattern: - -| CPU | Memory | File System | Network | Pod Count -| :----: | :-----: | :----: | :----: | :----: -| `13.84` **Used 42.16 available of 56** | `73.5 GiB` **146.3 GiB available of 219.8 GiB** | `106 GiB` **732.9 GiB available of 838.9 GiB** | `20.65` **MBps in** `22.84` **MBps out** | `354` **pods** - -### Industrial-Edge Pattern OpenShift Factory Edge Cluster Size - -The OpenShift cluster is made of 3 Nodes combining Master/Workers for the Edge/Factory cluster. - -| Node Type | Number of nodes | Cloud Provider | Instance Type -| :----: | :----: | :----: | :----: -| Master/Worker | 3 | Google Cloud | n1-standard-8 -| Master/Worker | 3 | Amazon Cloud Services | m5.2xlarge -| Master/Worker | 3 | Microsoft Azure | Standard_D8s_v3 - -#### Factory/Edge Cluster Utilization - -**GCP** - -This is a snapshot of a Google Cloud Factory Edge cluster running the production **Industrial-Edge** pattern. - -| CPU | Memory | File System | Network | Pod Count -| :----: | :-----: | :----: | :----: | :----: -| `6.55` **17.45 available of 24** | `43.19 GiB` usage **45.09 GiB available of 88.28 GiB** | 48.45 GiB usage **334 GiB available of 382.5 GiB** | 9.64 **MBps in15.79 MBps out** | 187 **pods** - -**AWS** - -This is a snapshot of a Amazon Web Services Factory Edge cluster running the production **Industrial-Edge** pattern. - -| CPU | Memory | File System | Network | Pod Count -| :----: | :-----: | :----: | :----: | :----: -| `5.1` **18.9 available of 24** |`42.91` **GiB 49.27 GiB available of 92.18 GiB** | `51.54` **GiB 308 GiB available of 359.5 GiB** | `9.41` **MBps in** `10.38` **MBps out** | 194 **pods** - -**Azure** - -This is a snapshot of an Azure Factory Edge cluster running the production **Industrial-Edge** pattern. - -| CPU | Memory | File System | Network | Pod Count -| :----: | :-----: | :----: | :----: | :----: -| `7.86` **15.65 available of 24** | `42.76` **Gib used 51.15 GiB available of 94.2 GiB** | `71.29` **GiB used 2.93 TiB available of 3 TiB** | `8.98` **MBps in** `9.64` **MBps out** | 192 **pods* - -### AWS Instance Types - -The **industrial-edge** pattern was tested with the highlighted AWS instances in **bold**. The OpenShift installer will let you know if the instance type meets the minimum requirements for a cluster. - -The message that the openshift installer will give you will be similar to this message - -```text -INFO Credentials loaded from default AWS environment variables -FATAL failed to fetch Metadata: failed to load asset "Install Config": [controlPlane.platform.aws.type: Invalid value: "m4.large": instance type does not meet minimum resource requirements of 4 vCPUs, controlPlane.platform.aws.type: Invalid value: "m4.large": instance type does not meet minimum resource requirements of 16384 MiB Memory] -``` - -Below you can find a list of the AWS instance types that can be used to deploy the **industrial-edge** pattern. - -| Instance type | Default vCPUs | Memory (GiB) | Datacenter | Factory/Edge -| :------: | :-----: | :-----: | :----: | :----: -| | | | 3x3 OCP Cluster | 3 Node OCP Cluster -| m4.xlarge | 4 | 16 | N | N -| **m4.2xlarge** | 8 | 32 | Y | Y -| m4.4xlarge | 16 | 64 | Y | Y -| m4.10xlarge | 40 | 160 | Y | Y -| m4.16xlarge | 64 | 256 | Y | Y -| **m5.xlarge** | 4 | 16 | Y | N -| **m5.2xlarge** | 8 | 32 | Y | Y -| m5.4xlarge | 16 | 64 | Y | Y -| m5.8xlarge | 32 | 128 | Y | Y -| m5.12xlarge | 48 | 192 | Y | Y -| m5.16xlarge | 64 | 256 | Y | Y -| m5.24xlarge | 96 | 384 | Y | Y - -The OpenShift cluster is made of 4 Masters and 3 Workers for the Datacenter and the Edge/Factory cluster are made of 3 Master/Worker nodes. For the node sizes we used the **m5.xlarge** on AWS and this instance type met the minimum requirements to deploy the **industrial-edge** pattern successfully on the Datacenter hub. On the Factory/Edge cluster we used the **m5.2xlarge** since the minimum cluster was comprised of 3 nodes. . - -To understand better what types of nodes you can use on other Cloud Providers we provide some of the details below. - -### Azure Instance Types - -The **industrial-edge** pattern was also deployed on Azure using the **Standard_D8s_v3** VM size. Below is a table of different VM sizes available for Azure. Keep in mind that due to limited access to Azure we only used the **Standard_D8s_v3** VM size. - -The OpenShift cluster is made of 3 Master and 3 Workers for the Datacenter cluster. - -The OpenShift cluster is made of 3 Nodes combining Master/Workers for the Edge/Factory cluster. - -| Type | Sizes | Description -| :---- | :---- | :---- -| [General purpose](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-general) |B, Dsv3, Dv3, Dasv4, Dav4, DSv2, Dv2, Av2, DC, DCv2, Dv4, Dsv4, Ddv4, Ddsv4 | Balanced CPU-to-memory ratio. Ideal for testing and development, small to medium databases, and low to medium traffic web servers. -| [Compute optimized](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-compute) | F, Fs, Fsv2, FX | High CPU-to-memory ratio. Good for medium traffic web servers, network appliances, batch processes, and application servers. -| [Memory optimized](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-memory) | Esv3, Ev3, Easv4, Eav4, Ev4, Esv4, Edv4, Edsv4, Mv2, M, DSv2, Dv2 | High memory-to-CPU ratio. Great for relational database servers, medium to large caches, and in-memory analytics. -| [Storage optimized](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-storage) | Lsv2 | High disk throughput and IO ideal for Big Data, SQL, NoSQL databases, data warehousing and large transactional databases. -| [GPU](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-gpu) | NC, NCv2, NCv3, NCasT4_v3, ND, NDv2, NV, NVv3, NVv4 | Specialized virtual machines targeted for heavy graphic rendering and video editing, as well as model training and inferencing (ND) with deep learning. Available with single or multiple GPUs. -| [High performance compute](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes-hpc) | HB, HBv2, HBv3, HC, H | Our fastest and most powerful CPU virtual machines with optional high-throughput network interfaces (RDMA). - -For more information please refer to the [Azure VM Size Page](https://docs.microsoft.com/en-us/azure/virtual-machines/sizes). - -### Google Cloud (GCP) Instance Types - -The **industrial-edge** pattern was also deployed on GCP using the **n1-standard-8** VM size. Below is a table of different VM sizes available for GCP. Keep in mind that due to limited access to GCP we only used the **n1-standard-8** VM size. - -The OpenShift cluster is made of 3 Master and 3 Workers for the Datacenter cluster. - -The OpenShift cluster is made of 3 Nodes combining Master/Workers for the Edge/Factory cluster. - -The following table provides VM recommendations for different workloads. - -| **General purpose** | **Workload optimized** -| Cost-optimized | Balanced | Scale-out optimized | Memory-optimized |Compute-optimized | Accelerator-optimized -| :---- | :---- | :---- | :---- | :---- | :---- -| E2 | N2, N2D, N1 | T2D | M2, M1 | C2 | A2 -Day-to-day computing at a lower cost | Balanced price/performance across a wide range of VM shapes | Best performance/cost for scale-out workloads | Ultra high-memory workloads | Ultra high performance for compute-intensive workloads | Optimized for high performance computing workloads - -For more information please refer to the [GCP VM Size Page](https://cloud.google.com/compute/docs/machine-types). diff --git a/modules/cluster-sizing-template.adoc b/modules/cluster-sizing-template.adoc index 858ce438b..d9c94fb95 100644 --- a/modules/cluster-sizing-template.adoc +++ b/modules/cluster-sizing-template.adoc @@ -2,7 +2,7 @@ :imagesdir: ../../images [id="{name}-openshift-hub-cluster-size"] -= {display_name} pattern hub/datacenter cluster size +== {display_name} pattern hub/datacenter cluster size The {display_name} pattern has been tested with a defined set of specifically tested configurations that represent the most common combinations that Red Hat @@ -19,39 +19,47 @@ The datacenter hub OpenShift cluster uses the following the deployment configura ifdef::requirements_hub_controlPlane_platform_aws_replicas[] | Amazon Web Services | Control Plane -| {requirements_hub_controlPlatform_platform_aws_replicas} -| {requirements_hub_controlPlatform_platform_aws_type} +| {requirements_hub_controlPlane_platform_aws_replicas} +| {requirements_hub_controlPlane_platform_aws_type} +endif::requirements_hub_controlPlane_platform_aws_replicas[] +ifdef::requirements_hub_compute_platform_aws_replicas[] | Amazon Web Services | Worker | {requirements_hub_compute_platform_aws_replicas} | {requirements_hub_compute_platform_aws_type} -endif::requirements_hub_controlPlane_platform_aws_replicas[] +endif::requirements_hub_compute_platform_aws_replicas[] ifdef::requirements_hub_controlPlane_platform_gcp_replicas[] | Google Cloud Platform | Control Plane -| {requirements_hub_controlPlatform_platform_gcp_replicas} -| {requirements_hub_controlPlatform_platform_gcp_type} +| {requirements_hub_controlPlane_platform_gcp_replicas} +| {requirements_hub_controlPlane_platform_gcp_type} +endif::requirements_hub_controlPlane_platform_gcp_replicas[] +ifdef::requirements_hub_compute_platform_gcp_replicas[] | Google Cloud Platform | Worker | {requirements_hub_compute_platform_gcp_replicas} | {requirements_hub_compute_platform_gcp_type} -endif::requirements_hub_controlPlane_platform_gcp_replicas[] +endif::requirements_hub_compute_platform_gcp_replicas[] ifdef::requirements_hub_controlPlane_platform_azure_replicas[] | Microsoft Azure | Control Plane -| {requirements_hub_controlPlatform_platform_azure_replicas} -| {requirements_hub_controlPlatform_platform_azure_type} +| {requirements_hub_controlPlane_platform_azure_replicas} +| {requirements_hub_controlPlane_platform_azure_type} +endif::requirements_hub_controlPlane_platform_azure_replicas[] +ifdef::requirements_hub_compute_platform_azure_replicas[] | Microsoft Azure | Worker | {requirements_hub_compute_platform_azure_replicas} | {requirements_hub_compute_platform_azure_type} -endif::requirements_hub_controlPlane_platform_azure_replicas[] +endif::requirements_hub_compute_platform_azure_replicas[] |=== ifeval::["{extra_features_spoke_support}" == "true"] + +[id="{name}-openshift-spoke-cluster-size"] == {display_name} spoke/managed cluster size minimum requirements .Spoke cluster minimum requirements @@ -62,35 +70,41 @@ ifeval::["{extra_features_spoke_support}" == "true"] ifdef::requirements_spoke_controlPlane_platform_aws_replicas[] | Amazon Web Services | Control Plane -| {requirements_spoke_controlPlatform_platform_aws_replicas} -| {requirements_spoke_controlPlatform_platform_aws_type} +| {requirements_spoke_controlPlane_platform_aws_replicas} +| {requirements_spoke_controlPlane_platform_aws_type} +endif::requirements_spoke_controlPlane_platform_aws_replicas[] +ifdef::requirements_spoke_compute_platform_aws_replicas[] | Amazon Web Services | Worker | {requirements_spoke_compute_platform_aws_replicas} | {requirements_spoke_compute_platform_aws_type} -endif::requirements_spoke_controlPlane_platform_aws_replicas[] +endif::requirements_spoke_compute_platform_aws_replicas[] ifdef::requirements_spoke_controlPlane_platform_gcp_replicas[] | Google Cloud Platform | Control Plane -| {requirements_spoke_controlPlatform_platform_gcp_replicas} -| {requirements_spoke_controlPlatform_platform_gcp_type} +| {requirements_spoke_controlPlane_platform_gcp_replicas} +| {requirements_spoke_controlPlane_platform_gcp_type} +endif::requirements_spoke_controlPlane_platform_gcp_replicas[] +ifdef::requirements_spoke_compute_platform_gcp_replicas[] | Google Cloud Platform | Worker | {requirements_spoke_compute_platform_gcp_replicas} | {requirements_spoke_compute_platform_gcp_type} -endif::requirements_spoke_controlPlane_platform_gcp_replicas[] +endif::requirements_spoke_compute_platform_gcp_replicas[] ifdef::requirements_spoke_controlPlane_platform_azure_replicas[] | Microsoft Azure | Control Plane -| {requirements_spoke_controlPlatform_platform_azure_replicas} -| {requirements_spoke_controlPlatform_platform_azure_type} +| {requirements_spoke_controlPlane_platform_azure_replicas} +| {requirements_spoke_controlPlane_platform_azure_type} +endif::requirements_spoke_controlPlane_platform_azure_replicas[] +ifdef::requirements_spoke_compute_platform_azure_replicas[] | Microsoft Azure | Worker | {requirements_spoke_compute_platform_azure_replicas} | {requirements_spoke_compute_platform_azure_type} -endif::requirements_spoke_controlPlane_platform_azure_replicas[] +endif::requirements_spoke_compute_platform_azure_replicas[] |===