feat(helm): update chart rook-ceph-cluster ( v1.18.1 → v1.19.2 )#468
Open
renovate[bot] wants to merge 1 commit intomainfrom
Open
feat(helm): update chart rook-ceph-cluster ( v1.18.1 → v1.19.2 )#468renovate[bot] wants to merge 1 commit intomainfrom
renovate[bot] wants to merge 1 commit intomainfrom
Conversation
--- HelmRelease: rook-ceph/rook-ceph-cluster Deployment: rook-ceph/rook-ceph-tools
+++ HelmRelease: rook-ceph/rook-ceph-cluster Deployment: rook-ceph/rook-ceph-tools
@@ -1,9 +1,9 @@
---
+kind: Deployment
apiVersion: apps/v1
-kind: Deployment
metadata:
name: rook-ceph-tools
namespace: rook-ceph
labels:
app: rook-ceph-tools
spec:
@@ -27,12 +27,13 @@
- |
# Replicate the script from toolbox.sh inline so the ceph image
# can be run directly, instead of requiring the rook toolbox
CEPH_CONFIG="/etc/ceph/ceph.conf"
MON_CONFIG="/etc/rook/mon-endpoints"
KEYRING_FILE="/etc/ceph/keyring"
+ CONFIG_OVERRIDE="/etc/rook-config-override/config"
# create a ceph config file in its default location so ceph/rados tools can be used
# without specifying any arguments
write_endpoints() {
endpoints=$(cat ${MON_CONFIG})
@@ -47,12 +48,19 @@
[global]
mon_host = ${mon_endpoints}
[client.admin]
keyring = ${KEYRING_FILE}
EOF
+
+ # Merge the config override if it exists and is not empty
+ if [ -f "${CONFIG_OVERRIDE}" ] && [ -s "${CONFIG_OVERRIDE}" ]; then
+ echo "$DATE merging config override from ${CONFIG_OVERRIDE}"
+ echo "" >> ${CEPH_CONFIG}
+ cat ${CONFIG_OVERRIDE} >> ${CEPH_CONFIG}
+ fi
}
# watch the endpoints config file and update if the mon endpoints ever change
watch_endpoints() {
# get the timestamp for the target of the soft link
real_path=$(realpath ${MON_CONFIG})
@@ -112,12 +120,15 @@
- mountPath: /etc/ceph
name: ceph-config
- name: mon-endpoint-volume
mountPath: /etc/rook
- name: ceph-admin-secret
mountPath: /var/lib/rook-ceph-mon
+ - name: rook-config-override
+ mountPath: /etc/rook-config-override
+ readOnly: true
serviceAccountName: rook-ceph-default
volumes:
- name: ceph-admin-secret
secret:
secretName: rook-ceph-mon
optional: false
@@ -127,12 +138,16 @@
- name: mon-endpoint-volume
configMap:
name: rook-ceph-mon-endpoints
items:
- key: data
path: mon-endpoints
+ - name: rook-config-override
+ configMap:
+ name: rook-config-override
+ optional: true
- name: ceph-config
emptyDir: {}
tolerations:
- key: node.kubernetes.io/unreachable
operator: Exists
effect: NoExecute
--- HelmRelease: rook-ceph/rook-ceph-cluster Ingress: rook-ceph/ceph-objectstore
+++ HelmRelease: rook-ceph/rook-ceph-cluster Ingress: rook-ceph/ceph-objectstore
@@ -1,9 +1,9 @@
---
+kind: Ingress
apiVersion: networking.k8s.io/v1
-kind: Ingress
metadata:
name: ceph-objectstore
namespace: rook-ceph
spec:
rules:
- host: rgw...PLACEHOLDER_SECRET_DOMAIN..
--- HelmRelease: rook-ceph/rook-ceph-cluster Ingress: rook-ceph/rook-ceph-dashboard
+++ HelmRelease: rook-ceph/rook-ceph-cluster Ingress: rook-ceph/rook-ceph-dashboard
@@ -1,9 +1,9 @@
---
+kind: Ingress
apiVersion: networking.k8s.io/v1
-kind: Ingress
metadata:
name: rook-ceph-dashboard
namespace: rook-ceph
annotations:
gethomepage.dev/app: rook-ceph-mgr
gethomepage.dev/description: Rook-Ceph Dashboard
--- HelmRelease: rook-ceph/rook-ceph-cluster CephBlockPool: rook-ceph/ceph-blockpool
+++ HelmRelease: rook-ceph/rook-ceph-cluster CephBlockPool: rook-ceph/ceph-blockpool
@@ -1,9 +1,9 @@
---
+kind: CephBlockPool
apiVersion: ceph.rook.io/v1
-kind: CephBlockPool
metadata:
name: ceph-blockpool
namespace: rook-ceph
spec:
failureDomain: host
replicated:
--- HelmRelease: rook-ceph/rook-ceph-cluster CephCluster: rook-ceph/rook-ceph
+++ HelmRelease: rook-ceph/rook-ceph-cluster CephCluster: rook-ceph/rook-ceph
@@ -4,20 +4,20 @@
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
monitoring:
enabled: true
+ cephVersion:
+ image: quay.io/ceph/ceph:v19.2.3
+ allowUnsupported: false
cephConfig:
global:
bdev_async_discard_threads: '1'
bdev_enable_discard: 'true'
osd_class_update_on_start: 'false'
- cephVersion:
- allowUnsupported: false
- image: quay.io/ceph/ceph:v19.2.3
cleanupPolicy:
allowUninstallWithVolumes: false
confirmation: ''
sanitizeDisks:
dataSource: zero
iteration: 1
--- HelmRelease: rook-ceph/rook-ceph-cluster CephFilesystem: rook-ceph/ceph-filesystem
+++ HelmRelease: rook-ceph/rook-ceph-cluster CephFilesystem: rook-ceph/ceph-filesystem
@@ -1,9 +1,9 @@
---
+kind: CephFilesystem
apiVersion: ceph.rook.io/v1
-kind: CephFilesystem
metadata:
name: ceph-filesystem
namespace: rook-ceph
spec:
dataPools:
- failureDomain: host
--- HelmRelease: rook-ceph/rook-ceph-cluster CephFilesystemSubVolumeGroup: rook-ceph/ceph-filesystem-csi
+++ HelmRelease: rook-ceph/rook-ceph-cluster CephFilesystemSubVolumeGroup: rook-ceph/ceph-filesystem-csi
@@ -1,9 +1,9 @@
---
+kind: CephFilesystemSubVolumeGroup
apiVersion: ceph.rook.io/v1
-kind: CephFilesystemSubVolumeGroup
metadata:
name: ceph-filesystem-csi
namespace: rook-ceph
spec:
name: csi
filesystemName: ceph-filesystem
--- HelmRelease: rook-ceph/rook-ceph-cluster CephObjectStore: rook-ceph/ceph-objectstore
+++ HelmRelease: rook-ceph/rook-ceph-cluster CephObjectStore: rook-ceph/ceph-objectstore
@@ -1,9 +1,9 @@
---
+kind: CephObjectStore
apiVersion: ceph.rook.io/v1
-kind: CephObjectStore
metadata:
name: ceph-objectstore
namespace: rook-ceph
spec:
dataPool:
erasureCoded:
--- HelmRelease: rook-ceph/rook-ceph-cluster PrometheusRule: rook-ceph/prometheus-ceph-rules
+++ HelmRelease: rook-ceph/rook-ceph-cluster PrometheusRule: rook-ceph/prometheus-ceph-rules
@@ -1,9 +1,9 @@
---
+kind: PrometheusRule
apiVersion: monitoring.coreos.com/v1
-kind: PrometheusRule
metadata:
labels:
prometheus: rook-prometheus
role: alert-rules
name: prometheus-ceph-rules
namespace: rook-ceph
--- HelmRelease: rook-ceph/rook-ceph-cluster StorageClass: rook-ceph/ceph-block
+++ HelmRelease: rook-ceph/rook-ceph-cluster StorageClass: rook-ceph/ceph-block
@@ -1,15 +1,12 @@
---
+kind: StorageClass
apiVersion: storage.k8s.io/v1
-kind: StorageClass
metadata:
name: ceph-block
annotations:
- helm.sh/hook: post-install,pre-upgrade
- helm.sh/hook-delete-policy: before-hook-creation
- helm.sh/resource-policy: keep
storageclass.kubernetes.io/is-default-class: 'true'
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
pool: ceph-blockpool
clusterID: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
--- HelmRelease: rook-ceph/rook-ceph-cluster StorageClass: rook-ceph/ceph-filesystem
+++ HelmRelease: rook-ceph/rook-ceph-cluster StorageClass: rook-ceph/ceph-filesystem
@@ -1,15 +1,12 @@
---
+kind: StorageClass
apiVersion: storage.k8s.io/v1
-kind: StorageClass
metadata:
name: ceph-filesystem
annotations:
- helm.sh/hook: post-install,pre-upgrade
- helm.sh/hook-delete-policy: before-hook-creation
- helm.sh/resource-policy: keep
storageclass.kubernetes.io/is-default-class: 'false'
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
fsName: ceph-filesystem
pool: ceph-filesystem-data0
clusterID: rook-ceph
--- HelmRelease: rook-ceph/rook-ceph-cluster StorageClass: rook-ceph/ceph-bucket
+++ HelmRelease: rook-ceph/rook-ceph-cluster StorageClass: rook-ceph/ceph-bucket
@@ -1,15 +1,11 @@
---
+kind: StorageClass
apiVersion: storage.k8s.io/v1
-kind: StorageClass
metadata:
name: ceph-bucket
- annotations:
- helm.sh/hook: post-install,pre-upgrade
- helm.sh/hook-delete-policy: before-hook-creation
- helm.sh/resource-policy: keep
provisioner: rook-ceph.ceph.rook.io/bucket
reclaimPolicy: Delete
volumeBindingMode: Immediate
parameters:
objectStoreName: ceph-objectstore
objectStoreNamespace: rook-ceph
--- HelmRelease: rook-ceph/rook-ceph-cluster VolumeSnapshotClass: rook-ceph/csi-ceph-filesystem
+++ HelmRelease: rook-ceph/rook-ceph-cluster VolumeSnapshotClass: rook-ceph/csi-ceph-filesystem
@@ -1,15 +1,12 @@
---
+kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1
-kind: VolumeSnapshotClass
metadata:
name: csi-ceph-filesystem
annotations:
- helm.sh/hook: post-install,pre-upgrade
- helm.sh/hook-delete-policy: before-hook-creation
- helm.sh/resource-policy: keep
snapshot.storage.kubernetes.io/is-default-class: 'false'
driver: rook-ceph.cephfs.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/snapshotter-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph
--- HelmRelease: rook-ceph/rook-ceph-cluster VolumeSnapshotClass: rook-ceph/csi-ceph-blockpool
+++ HelmRelease: rook-ceph/rook-ceph-cluster VolumeSnapshotClass: rook-ceph/csi-ceph-blockpool
@@ -1,15 +1,12 @@
---
+kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1
-kind: VolumeSnapshotClass
metadata:
name: csi-ceph-blockpool
annotations:
- helm.sh/hook: post-install,pre-upgrade
- helm.sh/hook-delete-policy: before-hook-creation
- helm.sh/resource-policy: keep
snapshot.storage.kubernetes.io/is-default-class: 'false'
driver: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
csi.storage.k8s.io/snapshotter-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/snapshotter-secret-namespace: rook-ceph |
--- kubernetes/apps/rook-ceph/rook-ceph/cluster Kustomization: rook-ceph/rook-ceph-cluster HelmRelease: rook-ceph/rook-ceph-cluster
+++ kubernetes/apps/rook-ceph/rook-ceph/cluster Kustomization: rook-ceph/rook-ceph-cluster HelmRelease: rook-ceph/rook-ceph-cluster
@@ -13,13 +13,13 @@
spec:
chart: rook-ceph-cluster
sourceRef:
kind: HelmRepository
name: rook-ceph
namespace: flux-system
- version: v1.18.1
+ version: v1.19.2
dependsOn:
- name: rook-ceph-operator
namespace: rook-ceph
- name: snapshot-controller
namespace: storage
install: |
114aaa8 to
674d48f
Compare
1a9fa73 to
8f54777
Compare
8f54777 to
099b7f2
Compare
099b7f2 to
1a1e69c
Compare
1a1e69c to
d8735f8
Compare
d8735f8 to
fc8dd5f
Compare
fc8dd5f to
f10b5a7
Compare
f10b5a7 to
dc0c2cc
Compare
dc0c2cc to
1ff9c14
Compare
1ff9c14 to
696bc73
Compare
64b6f05 to
f09cb51
Compare
f09cb51 to
ec1f6f6
Compare
ec1f6f6 to
c33d69e
Compare
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.This suggestion is invalid because no changes were made to the code.Suggestions cannot be applied while the pull request is closed.Suggestions cannot be applied while viewing a subset of changes.Only one suggestion per line can be applied in a batch.Add this suggestion to a batch that can be applied as a single commit.Applying suggestions on deleted lines is not supported.You must change the existing code in this line in order to create a valid suggestion.Outdated suggestions cannot be applied.This suggestion has been applied or marked resolved.Suggestions cannot be applied from pending reviews.Suggestions cannot be applied on multi-line comments.Suggestions cannot be applied while the pull request is queued to merge.Suggestion cannot be applied right now. Please check back later.
This PR contains the following updates:
v1.18.1→v1.19.2Warning
Some dependencies could not be looked up. Check the Dependency Dashboard for more information.
Release Notes
rook/rook (rook-ceph-cluster)
v1.19.2Compare Source
v1.19.1Compare Source
Improvements
Rook v1.19.1 is a patch release limited in scope and focusing on feature additions and bug fixes to the Ceph operator.
csi: Update to ceph csi operator to v0.5 (#17029, @subhamkrai)
security: Remove unnecessary nodes/proxy RBAC enablement (#16979, @ibotty)
helm: Set default ceph image pull policy (#16954, @travisn)
nfs: Add CephNFS.spec.server.{image,imagePullPolicy} fields (#16982, @jhoblitt)
osd: Assign correct osd container in case it is not index 0 (#16969, @kyrbrbik)
csi: Remove obsolete automated node fencing code (#16922, @subhamkrai)
osd: Enable proper cancellation during OSD reconcile (#17022, @sp98)
csi: Allow running the csi controller plugin on host network (#16972, @Madhu-1)
rgw: Update ca bundle mount perms to read-all (#16968, @BlaineEXE)
mon: Change do-not-reconcile to be more granular for individual mons (#16939, @travisn)
build(deps): Bump the k8s-dependencies group with 6 updates (#16846, @dependabot[bot])
doc: add csi-operator example in configuration doc (#17001, @subhamkrai)
v1.19.0Compare Source
Upgrade Guide
To upgrade from previous versions of Rook, see the Rook upgrade guide.
Breaking Changes
to Ceph v19.2.0 or higher before upgrading Rook.
activeStandbyproperty in theCephFilesystemCRD has changed. When set tofalse, the standby MDS daemon deployment will be scaled down and removed, rather than only disabling the standby cache while the daemon remains running.rook-ceph-clusterchart has changed where the Ceph image is defined, to allow separate settings for the repository and tag. For more details, see the Rook upgrade guide.Features
ROOK_RECONCILE_CONCURRENT_CLUSTERSto a value greater than1.v1.18.9Compare Source
Improvements
Rook v1.18.9 is a patch release limited in scope and focusing on feature additions and bug fixes to the Ceph operator.
v1.18.8Compare Source
Improvements
Rook v1.18.8 is a patch release limited in scope and focusing on feature additions and bug fixes to the Ceph operator.
v1.18.7Compare Source
Improvements
Rook v1.18.7 is a patch release limited in scope and focusing on feature additions and bug fixes to the Ceph operator.
OSDMaxUpdatesInParallelto tune OSD updates (#16655, @jhoblitt)v1.18.6Compare Source
Improvements
Rook v1.18.6 is a patch release with changes only in the rook-ceph helm chart. If not affected by #16636 in v1.18.5, no need to update to this release.
v1.18.5Compare Source
Improvements
Rook v1.18.5 is a patch release limited in scope and focusing on feature additions and bug fixes to the Ceph operator.
v1.18.4Compare Source
Improvements
Rook v1.18.4 is a patch release with changes only in the rook-ceph-cluster helm chart. If not affected by #16567 in v1.18.3, no need to update to this release.
v1.18.3Compare Source
Improvements
Rook v1.18.3 is a patch release limited in scope and focusing on feature additions and bug fixes to the Ceph operator.
v1.18.2Compare Source
Improvements
Rook v1.18.2 is a patch release limited in scope and focusing on feature additions and bug fixes to the Ceph operator.
Configuration
📅 Schedule: Branch creation - "every weekend" in timezone America/New_York, Automerge - At any time (no schedule defined).
🚦 Automerge: Disabled by config. Please merge this manually once you are satisfied.
♻ Rebasing: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox.
🔕 Ignore: Close this PR and you won't be reminded about this update again.
This PR was generated by Mend Renovate. View the repository job log.