diff --git a/charts/debezium/templates/deployment.yaml b/charts/debezium/templates/deployment.yaml index 2e040c2bf..29553bc44 100644 --- a/charts/debezium/templates/deployment.yaml +++ b/charts/debezium/templates/deployment.yaml @@ -40,8 +40,9 @@ spec: resources: {{- toYaml .Values.connect.resources | nindent 12 }} + {{- if not .Values.connect.properties.probe }} livenessProbe: - initialDelaySeconds: 60 # Wait 60s after container starts before first check + initialDelaySeconds: 600 # Wait 180s after container starts before first check periodSeconds: 20 # Check every 20s timeoutSeconds: 5 # Timeout for each check failureThreshold: 3 # Mark container unhealthy after 3 consecutive failures @@ -86,10 +87,12 @@ spec: # If all connectors and tasks are running, container is healthy exit 0 + {{- end }} # Readiness probe: Checks if container is ready to accept traffic + {{- if not .Values.connect.properties.probe }} readinessProbe: - initialDelaySeconds: 60 + initialDelaySeconds: 600 periodSeconds: 20 timeoutSeconds: 5 failureThreshold: 3 @@ -134,6 +137,7 @@ spec: # If all connectors and tasks are running, container is ready exit 0 + {{- end }} volumeMounts: - name: config diff --git a/charts/debezium/values-dts1.yaml b/charts/debezium/values-dts1.yaml index bfb27b719..ae2eaff4c 100644 --- a/charts/debezium/values-dts1.yaml +++ b/charts/debezium/values-dts1.yaml @@ -24,22 +24,22 @@ connect: resources: limits: - cpu: 1000m - memory: 2Gi + cpu: 2000m + memory: 8Gi requests: - cpu: 500m - memory: 1Gi + cpu: 1500m + memory: 7Gi properties: - group_id: "debezium-odse-srte-connector-v081525" + group_id: "debezium-odse-srte-connector-v091725-1" topics_basename: "debezium-odse-srte-connector" default_replication_factor: 2 default_partitions: 10 default_cleanup: "compact" sql_server_agent_override: false sql_server_agent_status: "" - bootstrap_server: "b-1.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092, - b-2.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092" + bootstrap_server: "b-2.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092,b-1.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092" + probe: false connector_enable: nbs_odse: "enabled" @@ -47,18 +47,17 @@ connect: nbs_odse_meta: "enabled" sqlserverconnector_odse: { - "name": "debezium-odse-connector-v081525", + "name": "debezium-odse-connector-v091725-1", "config": { "connector.class": "io.debezium.connector.sqlserver.SqlServerConnector", - "database.hostname": "nbs-db.private-dts1.nbspreview.com", + "database.hostname": "cdc-nbs-alabama-rds-mssql.czya31goozkz.us-east-1.rds.amazonaws.com", "database.port": "1433", "database.user": "", "database.password": "", "database.dbname": "nbs_odse", "database.names": "nbs_odse", "database.server.name": "odse", - "database.history.kafka.bootstrap.servers": "b-1.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092, - b-2.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092", + "database.history.kafka.bootstrap.servers": "b-2.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092,b-1.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092", "database.history.kafka.topic": "dbhistory.database_server_name.database_name", # Uncomment following to manually bypass the sqlserver agent status query results #"database.sqlserver.agent.status.query": "select dbo.IsSqlAgentRunning()", @@ -70,8 +69,7 @@ connect: "producer.message.max.bytes": "10000000", #10MB "snapshot.mode": "no_data", "schema.history.internal.kafka.topic": "odse-schema-history", - "schema.history.internal.kafka.bootstrap.servers": "b-1.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092, - b-2.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092", + "schema.history.internal.kafka.bootstrap.servers": "b-2.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092,b-1.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092", "table.include.list": "dbo.Person, dbo.Organization, dbo.Observation, dbo.Public_health_case, dbo.Treatment, dbo.state_defined_field_data, dbo.Notification, dbo.Interview, dbo.Place, dbo.CT_contact, dbo.Auth_user, dbo.Intervention, dbo.Act_relationship", @@ -82,7 +80,7 @@ connect: "topic.creation.default.cleanup.policy": "compact", "time.precision.mode": "connect", "transforms": "dropPrefix, convertTimezone", - "transforms.dropPrefix.regex": "cdc\\.NBS_ODSE\\.dbo\\.(.+)", + "transforms.dropPrefix.regex": "cdc\\.nbs_odse\\.dbo\\.(.+)", "transforms.dropPrefix.type": "org.apache.kafka.connect.transforms.RegexRouter", "transforms.dropPrefix.replacement": "nbs_$1", "transforms.convertTimezone.type": "io.debezium.transforms.TimezoneConverter", @@ -93,18 +91,17 @@ connect: } sqlserverconnector_odse_meta: { - "name": "debezium-odse-meta-tables-connector-v081525", + "name": "debezium-odse-meta-tables-connector-v091725-1", "config": { "connector.class": "io.debezium.connector.sqlserver.SqlServerConnector", - "database.hostname": "nbs-db.private-dts1.nbspreview.com", + "database.hostname": "cdc-nbs-alabama-rds-mssql.czya31goozkz.us-east-1.rds.amazonaws.com", "database.port": "1433", "database.user": "", "database.password": "", "database.dbname": "nbs_odse", "database.names": "nbs_odse", "database.server.name": "odse-meta", - "database.history.kafka.bootstrap.servers": "b-1.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092, - b-2.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092", + "database.history.kafka.bootstrap.servers": "b-2.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092,b-1.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092", "database.history.kafka.topic": "dbhistory.database_server_name.database_name", # Uncomment following to manually bypass the sqlserver agent status query results #"database.sqlserver.agent.status.query": "select dbo.IsSqlAgentRunning()", @@ -116,8 +113,7 @@ connect: "producer.message.max.bytes": "10000000", #10MB "snapshot.mode": "no_data", "schema.history.internal.kafka.topic": "odse-schema-history", - "schema.history.internal.kafka.bootstrap.servers": "b-1.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092, - b-2.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092", + "schema.history.internal.kafka.bootstrap.servers": "b-2.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092,b-1.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092", "table.include.list": "dbo.Page_cond_mapping, dbo.NBS_page, dbo.NBS_ui_metadata, dbo.NBS_rdb_metadata, dbo.state_defined_field_metadata, dbo.NBS_configuration, dbo.LOOKUP_QUESTION", "tasks.max": "1", @@ -128,7 +124,7 @@ connect: "time.precision.mode": "connect", "transforms": "dropPrefix, convertTimezone, unwrap, convertTimestampsConfig_add_time, convertTimestampsConfig_last_chg_time, convertTimestampsConfig_record_status_time", - "transforms.dropPrefix.regex": "cdc\\.NBS_ODSE\\.dbo\\.(.+)", + "transforms.dropPrefix.regex": "cdc\\.nbs_odse\\.dbo\\.(.+)", "transforms.dropPrefix.type": "org.apache.kafka.connect.transforms.RegexRouter", "transforms.dropPrefix.replacement": "nrt_odse_$1", "transforms.convertTimezone.type": "io.debezium.transforms.TimezoneConverter", @@ -152,18 +148,17 @@ connect: } sqlserverconnector_srte: { - "name": "debezium-srte-connector-v081525", + "name": "debezium-srte-connector-v091725-1", "config": { "connector.class": "io.debezium.connector.sqlserver.SqlServerConnector", - "database.hostname": "nbs-db.private-dts1.nbspreview.com", + "database.hostname": "cdc-nbs-alabama-rds-mssql.czya31goozkz.us-east-1.rds.amazonaws.com", "database.port": "1433", "database.user": "", "database.password": "", "database.dbname": "nbs_srte", "database.names": "nbs_srte", "database.server.name": "srte", - "database.history.kafka.bootstrap.servers": "b-1.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092, - b-2.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092", + "database.history.kafka.bootstrap.servers": "b-2.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092,b-1.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092", "database.history.kafka.topic": "dbhistory.database_server_name.database_name", # Uncomment following to manually bypass the sqlserver agent status query results #"database.sqlserver.agent.status.query": "select dbo.IsSqlAgentRunning()", @@ -175,8 +170,7 @@ connect: "producer.message.max.bytes": "10000000", #10MB "snapshot.mode": "no_data", "schema.history.internal.kafka.topic": "srte-schema-history", - "schema.history.internal.kafka.bootstrap.servers": "b-1.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092, - b-2.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092", + "schema.history.internal.kafka.bootstrap.servers": "b-2.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092,b-1.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092", "table.include.list": "dbo.Condition_code,dbo.Program_area_code,dbo.Language_code,dbo.State_code,dbo.Unit_code, dbo.Cntycity_code_value,dbo.Lab_result,dbo.Country_code,dbo.Labtest_loinc,dbo.ELR_XREF, dbo.Loinc_condition,dbo.Loinc_snomed_condition,dbo.Lab_test,dbo.Zip_code_value, @@ -208,7 +202,7 @@ connect: "transforms.dropPrefix.replacement": "nrt_srte_$1", ### Snomed condition specific transforms - "transforms.dropPrefixConfig.regex": "cdc\\.NBS_SRTE\\.dbo\\.Snomed_condition", + "transforms.dropPrefixConfig.regex": "cdc\\.nbs_srte\\.dbo\\.Snomed_condition", "transforms.dropPrefixConfig.type": "org.apache.kafka.connect.transforms.RegexRouter", "transforms.dropPrefixConfig.replacement": "nrt_srte_Snomed_condition", "transforms.unwrapConfig.type": "io.debezium.transforms.ExtractNewRecordState", @@ -277,14 +271,13 @@ connect: env: - name: BOOTSTRAP_SERVERS - value: "b-1.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092, - b-2.nrtreportingdebezium.89ln12.c11.kafka.us-east-1.amazonaws.com:9092" + value: "b-2.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092,b-1.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092" - name: LOG_LEVEL value: "INFO" - name: KAFKA_LOG4J_OPTS value: "-Dlog4j.configuration=file:/kafka/config/log4j.properties" - name: NAME - value: "debezium-odse-srte-connector-v081525" + value: "debezium-odse-srte-connector-v091725-1" - name: TZ value: "UTC" diff --git a/charts/kafka-connect-sink/values-dts1.yaml b/charts/kafka-connect-sink/values-dts1.yaml index e63c14a63..533edd37f 100644 --- a/charts/kafka-connect-sink/values-dts1.yaml +++ b/charts/kafka-connect-sink/values-dts1.yaml @@ -24,7 +24,7 @@ servicePort: 8083 ## ref: https://docs.confluent.io/current/connect/userguide.html#configuring-workers configurationOverrides: "plugin.path": "/usr/share/confluent-hub-components,/usr/share/java" - "topics_basename": "kafka-sink-connector-070825" + "topics_basename": "kafka-sink-connector-v091725-1" "key.converter": "org.apache.kafka.connect.json.JsonConverter" "value.converter": "org.apache.kafka.connect.json.JsonConverter" "key.converter.schemas.enable": "false" @@ -37,13 +37,13 @@ configurationOverrides: sqlServerConnectorEnabled: true sqlServerConnector: { - "name": "Kafka-Connect-SqlServer-Sink-071025", + "name": "Kafka-Connect-SqlServer-Sink-v091725-1", "config": { "connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector", - "tasks.max": "1", + "tasks.max": "2", "offset.flush.interval.ms": "60000", - "batch.size": "200", - "connection.url": "jdbc:sqlserver://nbs-db.EXAMPLE_FIXME.nbspreview.com:1433;databaseName=rdb_modern;encrypt=true;trustServerCertificate=true;", + "batch.size": "5000", + "connection.url": "jdbc:sqlserver://cdc-nbs-alabama-rds-mssql.czya31goozkz.us-east-1.rds.amazonaws.com:1433;databaseName=rdb_modern;encrypt=true;trustServerCertificate=true;", "connection.user": "", "connection.password": "", "connection.pool.min_size": "5", @@ -89,12 +89,12 @@ JDBC_CONNECTOR_CONFIG: /etc/jdbcConnector.sh # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. resources: -# limits: -# memory: "2Gi" -# cpu: "1000m" -# requests: -# memory: "1Gi" -# cpu: "500m" + limits: + memory: "8Gi" + cpu: "2000m" + requests: + memory: "7Gi" + cpu: "1500m" ## Custom pod annotations podAnnotations: { } @@ -137,7 +137,7 @@ prometheus: ## You can list load balanced service endpoint, or list of all brokers (which is hard in K8s). e.g.: ## bootstrapServers: "PLAINTEXT://dozing-prawn-kafka-headless:9092" kafka: - bootstrapServers: "b-1.nrtreporting.rx5iwx.c5.kafka.us-east-1.amazonaws.com:9092,b-2.nrtreporting.rx5iwx.c5.kafka.us-east-1.amazonaws.com:9092" + bootstrapServers: "b-2.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092,b-1.nrtclusterv2.6bwemz.c13.kafka.us-east-1.amazonaws.com:9092" default_replication_factor: 2 default_partitions: 10 default_cleanup: "compact" diff --git a/charts/rtr/templates/deployment.yaml b/charts/rtr/templates/deployment.yaml index 2c6bea873..e12486921 100644 --- a/charts/rtr/templates/deployment.yaml +++ b/charts/rtr/templates/deployment.yaml @@ -119,7 +119,9 @@ spec: - name: FF_SERVICE_DISABLE value: {{- if and (hasKey $service "featureFlag") (hasKey $service.featureFlag "serviceDisable") }} {{ $service.featureFlag.serviceDisable | quote }} {{- else }} "false" {{- end }} - name: FF_THREAD_POOL_SIZE - value: {{- if and (hasKey $service "featureFlag") (hasKey $service.featureFlag "threadPoolSize") }} {{ $service.featureFlag.threadPoolSize }} {{- else }} 1 {{- end }} + value: {{- if and (hasKey $service "featureFlag") (hasKey $service.featureFlag "threadPoolSize") }} {{ $service.featureFlag.threadPoolSize }} {{- else }} "1" {{- end }} + - name: KAFKA_CONSUMER_MAX_POLL_RECS + value: {{- if and (hasKey $service "kafka") (hasKey $service.kafka "maxPollRecs") }} {{ $service.kafka.maxPollRecs | quote }} {{- else }} "200" {{- end }} {{- if $service.nodeSelector }} nodeSelector: diff --git a/charts/rtr/values-dts1.yaml b/charts/rtr/values-dts1.yaml index 65bbb87e5..6131e3b1c 100644 --- a/charts/rtr/values-dts1.yaml +++ b/charts/rtr/values-dts1.yaml @@ -20,11 +20,12 @@ global: image: repository: "" - tag: 1.0.1-SNAPSHOT.c7a47bd + tag: 1.0.1-SNAPSHOT.ab1ba94 pullPolicy: IfNotPresent kafka: cluster: "" + maxPollRecs: 5000 serviceAccount: create: true @@ -64,6 +65,8 @@ services: # Investigation Reporting Service # ---------------------------------------- investigation-reporting: + kafka: + maxPollRecs: 5000 secrets: jdbc: secretName: "rtr-access" @@ -77,7 +80,7 @@ services: name: '' annotations: { } featureFlag: - phcDatamartDisable: "false" + phcDatamartDisable: "true" threadPoolSize: 1 log: path: /usr/share/investigation-reporting/data @@ -96,16 +99,25 @@ services: failureThreshold: 5 image: name: "investigation-reporting-service" - tag: 1.0.1-SNAPSHOT.c7a47bd + tag: 1.0.1-SNAPSHOT.ab1ba94 podAnnotations: prometheus.io/scrape: 'true' prometheus.io/path: /actuator/prometheus prometheus.io/port: '8093' + resources: + limits: + memory: "4Gi" + cpu: "1000m" + requests: + memory: "4G" + cpu: "1000m" # ---------------------------------------- # LDF Data Reporting Service # ---------------------------------------- ldfdata-reporting: + kafka: + maxPollRecs: 5000 secrets: jdbc: secretName: "rtr-access" @@ -135,12 +147,14 @@ services: failureThreshold: 5 image: name: "ldfdata-reporting-service" - tag: 1.0.1-SNAPSHOT.c7a47bd + tag: 1.0.1-SNAPSHOT.ab1ba94 # ---------------------------------------- # Observation Reporting Service # ---------------------------------------- observation-reporting: + kafka: + maxPollRecs: 5000 secrets: jdbc: secretName: "rtr-access" @@ -172,17 +186,26 @@ services: failureThreshold: 5 image: name: "observation-reporting-service" - tag: 1.0.1-SNAPSHOT.c7a47bd + tag: 1.0.1-SNAPSHOT.ab1ba94 podAnnotations: prometheus.io/scrape: 'true' prometheus.io/path: /actuator/prometheus prometheus.io/port: '8094' + resources: + limits: + memory: "6Gi" + cpu: "1000m" + requests: + memory: "6G" + cpu: "1000m" # ---------------------------------------- # Organization Reporting Service # ---------------------------------------- organization-reporting: + kafka: + maxPollRecs: 5000 secrets: jdbc: secretName: "rtr-access" @@ -197,7 +220,7 @@ services: annotations: { } featureFlag: elasticSearchEnable: "false" - phcDatamartDisable: "false" + phcDatamartDisable: "true" threadPoolSize: 1 log: path: /usr/share/organization-reporting/data @@ -216,16 +239,25 @@ services: failureThreshold: 5 image: name: "organization-reporting-service" - tag: 1.0.1-SNAPSHOT.c7a47bd + tag: 1.0.1-SNAPSHOT.ab1ba94 podAnnotations: prometheus.io/scrape: 'true' prometheus.io/path: /actuator/prometheus prometheus.io/port: '8092' + resources: + limits: + memory: "4Gi" + cpu: "1000m" + requests: + memory: "4G" + cpu: "1000m" # ---------------------------------------- # Person Reporting Service # ---------------------------------------- person-reporting: + kafka: + maxPollRecs: 5000 secrets: jdbc: secretName: "rtr-access" @@ -240,7 +272,7 @@ services: annotations: { } featureFlag: elasticSearchEnable: "false" - phcDatamartDisable: "false" + phcDatamartDisable: "true" threadPoolSize: 1 log: path: /usr/share/person-reporting/data @@ -259,16 +291,25 @@ services: failureThreshold: 5 image: name: "person-reporting-service" - tag: 1.0.1-SNAPSHOT.c7a47bd + tag: 1.0.1-SNAPSHOT.ab1ba94 podAnnotations: prometheus.io/scrape: 'true' prometheus.io/path: /actuator/prometheus prometheus.io/port: '8091' + resources: + limits: + memory: "6Gi" + cpu: "1000m" + requests: + memory: "6G" + cpu: "1000m" # ---------------------------------------- # Post-Processing Reporting Service # ---------------------------------------- post-processing-reporting: + kafka: + maxPollRecs: 5000 secrets: jdbc: secretName: "rtr-access" @@ -284,7 +325,7 @@ services: name: '' annotations: { } featureFlag: - serviceDisable: "false" + serviceDisable: "true" log: path: /usr/share/post-processing-reporting/data probes: @@ -302,7 +343,7 @@ services: failureThreshold: 5 image: name: "post-processing-reporting-service" - tag: 1.0.1-SNAPSHOT.c7a47bd + tag: 1.0.1-SNAPSHOT.ab1ba94 podAnnotations: prometheus.io/scrape: 'true' prometheus.io/path: /actuator/prometheus