diff --git a/Makefile b/Makefile index e8c6b0fa..9fb3ee0e 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,12 @@ run-triage-agent-standalone: -e MOCK_JIRA=$(MOCK_JIRA) \ triage-agent - +.PHONY: run-clones-analyzer-agent-standalone +run-clones-analyzer-agent-standalone: + $(COMPOSE_AGENTS) run --rm \ + -e JIRA_ISSUE=$(JIRA_ISSUE) \ + -e DRY_RUN=$(DRY_RUN) \ + clones-analyzer-agent .PHONY: run-rebase-agent-c9s-standalone diff --git a/agents/clones_analyzer_agent.py b/agents/clones_analyzer_agent.py new file mode 100644 index 00000000..e8d0cbfd --- /dev/null +++ b/agents/clones_analyzer_agent.py @@ -0,0 +1,142 @@ +import os +import copy +import logging +from typing import Any +from textwrap import dedent + +from beeai_framework.agents.requirement import RequirementAgent +from beeai_framework.agents.requirement.prompts import RequirementAgentSystemPrompt +from beeai_framework.agents.requirement.requirements.conditional import ( + ConditionalRequirement, +) +from beeai_framework.memory import UnconstrainedMemory +from beeai_framework.middleware.trajectory import GlobalTrajectoryMiddleware +from beeai_framework.tools import Tool +from beeai_framework.tools.think import ThinkTool +from beeai_framework.workflows import Workflow + +from pydantic import BaseModel, Field +from observability import setup_observability + +from tools.commands import RunShellCommandTool +from tools.version_mapper import VersionMapperTool +from common.models import ClonesInputSchema, ClonesOutputSchema +from utils import get_chat_model, get_tool_call_checker_config +from utils import mcp_tools, get_agent_execution_config + +logger = logging.getLogger(__name__) + + +def get_instructions() -> str: + return """ + You are an expert on finding other Jira issues related to a given Jira issue + in RHEL Jira project by analyzing the Jira fields and comments. + + To find other Jira issues which are clones of Jira issue, do the following: + + 1. Search for other Jira issues which have the same affected component as Jira issue in RHEL Jira project and extract their titles. + + 2. Compare the titles of the found Jira issues with the title of Jira issue and identify the ones which are clones. + For example, if the title of Jira issue is "CVE-YYYY-XXXXX libsoup3: Out-of-Bounds Read in Cookie Date Handling of libsoup HTTP Library [rhel-10.1]" + and you have found another Jira issue with the title "CVE-YYYY-XXXXX libsoup3: Out-of-Bounds Read in Cookie Date Handling of libsoup HTTP Library [rhel-10.0z]", + then it is a clone of Jira issue or Jira issue is a clone of the found Jira issue. + + 3.Usually clones are already linked to each other in Jira through the "Issue Links" field. + If not, link the found Jira issues to Jira issue and the Jira issue to the found Jira issues + through the "is related" relationship. + + General instructions: + - If in DRY RUN mode, do not link the Jira issues to each other but tell the user that you would have linked them. + """ + + +def get_prompt(input: ClonesInputSchema) -> str: + return f""" + Find other Jira issues which are clones of {input.jira_issue} Jira issue and link them to each other. + Also check if {input.jira_issue} Jira issue is a clone of any of the found Jira issues and link them to each other. + """ + +def get_agent_definition(gateway_tools: list[Tool]) -> dict[str, Any]: + return { + "name": "ExistingClonesAnalyzerAgent", + "llm": get_chat_model(), + "tool_call_checker": get_tool_call_checker_config(), + "tools": [ThinkTool(), RunShellCommandTool(), VersionMapperTool()] + + [t for t in gateway_tools if t.name in ["get_jira_details", "set_jira_fields"]], + "memory": UnconstrainedMemory(), + "requirements": [ + ConditionalRequirement( + ThinkTool, + force_at_step=1, + force_after=Tool, + consecutive_allowed=False, + only_success_invocations=False, + ), + ], + "middlewares": [GlobalTrajectoryMiddleware(pretty=True)], + "role": "Red Hat Enterprise Linux developer", + "instructions": get_instructions(), + "templates": {"system": copy.deepcopy(RequirementAgentSystemPrompt)} + } + +def create_clones_analyzer_agent(mcp_tools: list[Tool], local_tool_options: dict[str, Any]) -> RequirementAgent: + return RequirementAgent(**get_agent_definition(mcp_tools)) + +WORKFLOW_STEP_INSTRUCTIONS = dedent(""" + The final answer must be a JSON object with the following fields: + - `clones`: a list of Jira issue keys and branches that are clones of the given Jira issue or the given Jira issue is a clone of the found Jira issues + - `links`: a list of links you have added between the given Jira issue and the found Jira issues or the found Jira issues and the given Jira issue + ```json + { + "clones": [{"jira_issue": "RHEL-12345", "branch": "rhel-9.6z"}, + {"jira_issue": "RHEL-12346", "branch": "rhel-9.7"}], + "links": [{"source": "RHEL-12345", "target": "RHEL-12346"}, + {"source": "RHEL-12346", "target": "RHEL-12345"}] + } + ``` +""") + +async def main() -> None: + logging.basicConfig(level=logging.INFO) + + setup_observability(os.environ["COLLECTOR_ENDPOINT"]) + + dry_run = os.getenv("DRY_RUN", "False").lower() == "true" + + class State(BaseModel): + jira_issue: str + clones_result: ClonesOutputSchema | None = Field(default=None) + + async def run_workflow(jira_issue): + async with mcp_tools(os.getenv("MCP_GATEWAY_URL")) as gateway_tools: + clones_analyzer_agent = RequirementAgent( + **get_agent_definition(gateway_tools), + ) + + async def identify_existing_clones(state): + """Identify and link clones of the given Jira issue""" + logger.info(f"Identifying and linking clones of {state.jira_issue}") + response = await clones_analyzer_agent.run( + get_prompt(ClonesInputSchema(jira_issue=state.jira_issue)), + expected_output=WORKFLOW_STEP_INSTRUCTIONS, + **get_agent_execution_config(), + ) + + state.clones_result = ClonesOutputSchema.model_validate_json(response.last_message.text) + return Workflow.END + + workflow = Workflow(State, name="ClonesAnalyzerWorkflow") + workflow.add_step("identify_existing_clones", identify_existing_clones) + await workflow.run(State(jira_issue=jira_issue)) + + jira_issue = os.getenv("JIRA_ISSUE") + if not jira_issue: + logger.error("JIRA_ISSUE environment variable is required") + return + + await run_workflow(jira_issue) + + +if __name__ == "__main__": + import asyncio + asyncio.run(main()) diff --git a/agents/triage_agent.py b/agents/triage_agent.py index 4583e4de..ba240a39 100644 --- a/agents/triage_agent.py +++ b/agents/triage_agent.py @@ -10,6 +10,7 @@ from typing import Union from pydantic import BaseModel, Field +from typing import List from beeai_framework.agents.requirement import RequirementAgent from beeai_framework.agents.requirement.requirements.conditional import ( @@ -25,6 +26,7 @@ from beeai_framework.utils.strings import to_json import tasks +from agents import clones_analyzer_agent from common.config import load_rhel_config from common.models import ( Task, @@ -37,6 +39,9 @@ NoActionData, ErrorData, CVEEligibilityResult, + WhenEligibility, + ClonesOutputSchema, + Clone, ) from common.utils import redis_client, fix_await from common.constants import JiraLabels, RedisQueues @@ -300,6 +305,7 @@ async def main() -> None: class State(BaseModel): jira_issue: str + clones: List[Clone] | None = Field(default=None) cve_eligibility_result: CVEEligibilityResult | None = Field(default=None) triage_result: OutputSchema | None = Field(default=None) target_branch: str | None = Field(default=None) @@ -341,6 +347,21 @@ async def run_workflow(jira_issue): workflow = Workflow(State, name="TriageWorkflow") + async def run_clones_analyzer_agent(state): + """Run the clones analyzer agent""" + logger.info(f"Running clones analyzer agent for {state.jira_issue}") + clones_analyzer_agent_definition = RequirementAgent( + **clones_analyzer_agent.get_agent_definition(gateway_tools), + ) + + response = await clones_analyzer_agent_definition.run( + clones_analyzer_agent.get_prompt(clones_analyzer_agent.ClonesInputSchema(jira_issue=state.jira_issue)), + expected_output=clones_analyzer_agent.WORKFLOW_STEP_INSTRUCTIONS, + **clones_analyzer_agent.get_agent_execution_config(), + ) + state.clones = ClonesOutputSchema.model_validate_json(response.last_message.text).clones + return "check_cve_eligibility" + async def check_cve_eligibility(state): """Check CVE eligibility for the issue""" logger.info(f"Checking CVE eligibility for {state.jira_issue}") @@ -354,28 +375,59 @@ async def check_cve_eligibility(state): logger.info(f"CVE eligibility result: {state.cve_eligibility_result}") # If not eligible for triage, end workflow - if not state.cve_eligibility_result.is_eligible_for_triage: - logger.info(f"Issue {state.jira_issue} not eligible for triage: {state.cve_eligibility_result.reason}") - if state.cve_eligibility_result.error: + match state.cve_eligibility_result.when_eligible_for_triage: + case WhenEligibility.NEVER: + logger.info(f"Issue {state.jira_issue} not eligible for triage: {state.cve_eligibility_result.reason}") + if state.cve_eligibility_result.error: + state.triage_result = OutputSchema( + resolution=Resolution.ERROR, + data=ErrorData( + details=f"CVE eligibility check error: {state.cve_eligibility_result.error}", + jira_issue=state.jira_issue + ) + ) + return "comment_in_jira" + case WhenEligibility.LATER: + logger.info(f"Issue {state.jira_issue} is eligible for triage, but could be postponed: {state.cve_eligibility_result.reason}") + return "run_postponed_triage_analysis" + case WhenEligibility.IMMEDIATELY: + logger.info(f"Issue {state.jira_issue} is eligible for triage: {state.cve_eligibility_result.reason}") + return "run_triage_analysis" + case _: + logger.error(f"Unknown eligibility result: {state.cve_eligibility_result}") state.triage_result = OutputSchema( - resolution=Resolution.ERROR, - data=ErrorData( - details=f"CVE eligibility check error: {state.cve_eligibility_result.error}", - jira_issue=state.jira_issue + resolution=Resolution.ERROR, + data=ErrorData( + details=f"Unknown eligibility result: {state.cve_eligibility_result}", + jira_issue=state.jira_issue + ) ) - ) - else: + return "comment_in_jira" + + async def run_postponed_triage_analysis(state): + """Run the postponed triage analysis, + check if the Z-stream errata has been shipped + before proceeding with the triage analysis in a Y-stream""" + logger.info(f"Running postponed triage analysis for {state.jira_issue}") + for clone in state.clones: + z_streams_errata_shipped = await run_tool( + "check_z_stream_errata_shipped", + available_tools=gateway_tools, + issue_key=clone.jira_issue, + branch=clone.branch) + if not z_streams_errata_shipped: + msg = f"Z-stream errata not shipped yet for issue {clone.jira_issue} for branch {clone.branch}, postponing triage analysis" + logger.info(msg) state.triage_result = OutputSchema( - resolution=Resolution.NO_ACTION, + resolution=Resolution.POSTPONED, data=NoActionData( - reasoning=f"CVE eligibility check decided to skip triaging: {state.cve_eligibility_result.reason}", + reasoning=msg, jira_issue=state.jira_issue ) ) - return "comment_in_jira" + return "comment_in_jira" - reason = state.cve_eligibility_result.reason - logger.info(f"Issue {state.jira_issue} is eligible for triage: {reason}") + logger.info(f"All z-stream erratas shipped, proceeding with triage analysis") return "run_triage_analysis" async def run_triage_analysis(state): @@ -499,9 +551,23 @@ async def comment_in_jira(state): comment_text=comment_text, available_tools=gateway_tools, ) + if state.triage_result.resolution == Resolution.POSTPONED: + await tasks.set_jira_labels( + jira_issue=state.jira_issue, + labels_to_add=[JiraLabels.POSTPONED.value], + dry_run=dry_run + ) + elif JiraLabels.POSTPONED.value in JiraLabels.all_labels(): + await tasks.set_jira_labels( + jira_issue=state.jira_issue, + labels_to_remove=[JiraLabels.POSTPONED.value], + dry_run=dry_run + ) return Workflow.END + workflow.add_step("run_clones_analyzer_agent", run_clones_analyzer_agent) workflow.add_step("check_cve_eligibility", check_cve_eligibility) + workflow.add_step("run_postponed_triage_analysis", run_postponed_triage_analysis) workflow.add_step("run_triage_analysis", run_triage_analysis) workflow.add_step("verify_rebase_author", verify_rebase_author) workflow.add_step("determine_target_branch", determine_target_branch_step) diff --git a/common/__init__.py b/common/__init__.py index 890f288b..2343fa01 100644 --- a/common/__init__.py +++ b/common/__init__.py @@ -2,5 +2,6 @@ from .config import load_rhel_config from .models import CVEEligibilityResult +from .models import WhenEligibility -__all__ = ["load_rhel_config", "CVEEligibilityResult"] +__all__ = ["load_rhel_config", "CVEEligibilityResult", "WhenEligibility"] diff --git a/common/constants.py b/common/constants.py index db02c70e..68ea2699 100644 --- a/common/constants.py +++ b/common/constants.py @@ -69,6 +69,7 @@ class JiraLabels(Enum): REBASED = "jotnar_rebased" BACKPORTED = "jotnar_backported" MERGED = "jotnar_merged" + POSTPONED = "jotnar_postponed" REBASE_ERRORED = "jotnar_rebase_errored" BACKPORT_ERRORED = "jotnar_backport_errored" diff --git a/common/models.py b/common/models.py index 3a7f199c..523308ae 100644 --- a/common/models.py +++ b/common/models.py @@ -10,6 +10,11 @@ from pathlib import Path from enum import Enum +class WhenEligibility(Enum): + """When eligibility for triage.""" + IMMEDIATELY = "immediately" + LATER = "later" + NEVER = "never" class CVEEligibilityResult(BaseModel): """ @@ -21,8 +26,8 @@ class CVEEligibilityResult(BaseModel): is_cve: bool = Field( description="Whether this is a CVE (identified by SecurityTracking label)" ) - is_eligible_for_triage: bool = Field( - description="Whether triage agent should process this CVE" + when_eligible_for_triage: WhenEligibility = Field( + description="Whether triage agent should process this issue immediately, later or never" ) reason: str = Field( description="Explanation of the eligibility decision" @@ -118,6 +123,7 @@ class Resolution(Enum): BACKPORT = "backport" CLARIFICATION_NEEDED = "clarification-needed" NO_ACTION = "no-action" + POSTPONED = "postponed" ERROR = "error" @@ -292,3 +298,26 @@ class FailedPipelineJob(BaseModel): artifacts_url: str = Field( description="URL to browse job artifacts, empty string if no artifacts" ) + + +# ============================================================================ +# Clones Analyzer Agent Schemas +# ============================================================================ + +class ClonesInputSchema(BaseModel): + """Input schema for the clones analyzer agent.""" + jira_issue: str = Field(description="Jira issue key to identify clones of") + +class Clone(BaseModel): + """A clone of a Jira issue.""" + jira_issue: str = Field(description="Jira issue key") + branch: str = Field(description="Branch") + +class Link(BaseModel): + """A link between two Jira issues.""" + source: str = Field(description="Source Jira issue key") + target: str = Field(description="Target Jira issue key") +class ClonesOutputSchema(BaseModel): + """Output schema for the clones analyzer agent.""" + clones: list[Clone] = Field(description="List of Jira issue keys and branches that are clones of the given Jira issue or the given Jira issue is a clone of the found Jira issues") + links: list[Link] = Field(description="List of links between the given Jira issue and the found Jira issues or the found Jira issues and the given Jira issue") diff --git a/compose.yaml b/compose.yaml index 9ecf130c..7682fd4d 100644 --- a/compose.yaml +++ b/compose.yaml @@ -135,6 +135,11 @@ services: command: ["python", "agents/triage_agent.py"] profiles: ["agents"] + clones-analyzer-agent: + <<: *beeai-agent-c10s + command: ["python", "agents/clones_analyzer_agent.py"] + profiles: ["agents"] + backport-agent-c9s: <<: *beeai-agent-c9s command: ["python", "agents/backport_agent.py"] diff --git a/jira_issue_fetcher/jira_issue_fetcher.py b/jira_issue_fetcher/jira_issue_fetcher.py index 33d2cd09..af09010f 100644 --- a/jira_issue_fetcher/jira_issue_fetcher.py +++ b/jira_issue_fetcher/jira_issue_fetcher.py @@ -14,6 +14,13 @@ - Proper error handling and logging - Optimized API calls with field filtering - Timeouts + +Configuration: +- QUERY: Custom JQL query (default: "project=RHEL and assignee = jotnar-project") +- ONLY_POSTPONED_RETRY: If set to "true", "1", or "yes", only issues with + jotnar_postponed label will be removed from queues for retry. This mode is + intended for a secondary instance that runs less frequently to handle + postponed issues. """ import asyncio @@ -73,6 +80,9 @@ def __init__(self): # Use constant page size self.max_results_per_page = self.MAX_RESULTS_PER_PAGE + # Configuration for retry behavior: if True, only remove issues for retry when they have POSTPONED label + self.only_postponed_retry = os.getenv("ONLY_POSTPONED_RETRY", "false").lower() in ("true", "1", "yes") + self.headers = { "Authorization": f"Bearer {self.jira_token}", "Content-Type": "application/json", @@ -289,6 +299,9 @@ async def push_issues_to_queue(self, issues: List[Dict[str, Any]]) -> int: elif JiraLabels.RETRY_NEEDED.value in jotnar_labels: logger.info(f"Issue {issue_key} has jotnar_retry_needed label - marking for retry") remove_issues_for_retry.add(issue_key) + elif JiraLabels.POSTPONED.value in jotnar_labels and self.only_postponed_retry: + logger.info(f"Issue {issue_key} has jotnar_postponed label - re-processing it") + remove_issues_for_retry.add(issue_key) elif not jotnar_labels: logger.info(f"Issue {issue_key} has no jötnar labels - marking for retry") remove_issues_for_retry.add(issue_key) @@ -327,7 +340,8 @@ async def push_issues_to_queue(self, issues: List[Dict[str, Any]]) -> int: async def run(self) -> None: try: - logger.info("Starting Jira issue fetcher") + mode_str = "POSTPONED-only retry mode" if self.only_postponed_retry else "default retry mode" + logger.info(f"Starting Jira issue fetcher ({mode_str})") issues = await self.search_issues() @@ -371,4 +385,7 @@ async def main(): if os.getenv("QUERY"): logger.info("Using QUERY from environment variable") + if os.getenv("ONLY_POSTPONED_RETRY"): + logger.info("ONLY_POSTPONED_RETRY is set - using POSTPONED-only retry mode") + asyncio.run(main()) diff --git a/mcp_server/jira_tools.py b/mcp_server/jira_tools.py index ec5c94a8..d046bac4 100644 --- a/mcp_server/jira_tools.py +++ b/mcp_server/jira_tools.py @@ -1,4 +1,5 @@ import datetime +import logging import os import json import re @@ -17,7 +18,7 @@ from fastmcp.exceptions import ToolError from pydantic import Field -from common import CVEEligibilityResult, load_rhel_config +from common import CVEEligibilityResult, WhenEligibility, load_rhel_config # Jira custom field IDs SEVERITY_CUSTOM_FIELD = "customfield_12316142" @@ -51,6 +52,9 @@ def _get_jira_headers(token: str) -> dict[str, str]: } +logger = logging.getLogger(__name__) + + async def get_jira_details( issue_key: Annotated[str, Field(description="Jira issue key (e.g. RHEL-12345)")], ) -> dict[str, Any]: @@ -204,7 +208,7 @@ async def check_cve_triage_eligibility( if "SecurityTracking" not in labels: return CVEEligibilityResult( is_cve=False, - is_eligible_for_triage=True, + when_eligible_for_triage=WhenEligibility.IMMEDIATELY, reason="Not a CVE" ) @@ -212,26 +216,39 @@ async def check_cve_triage_eligibility( if not fix_versions: return CVEEligibilityResult( is_cve=True, - is_eligible_for_triage=False, + when_eligible_for_triage=WhenEligibility.NEVER, reason="CVE has no target release specified", error="CVE has no target release specified" ) target_version = fix_versions[0].get("name", "") - # Only process Z-stream CVEs (reject Y-stream) + # Process Z-stream CVEs (postpone Y-stream CVEs) if re.match(r"^rhel-\d+\.\d+$", target_version.lower()): + logger.info(f"Y-stream CVE {issue_key} will be handled after " + "all the z-streams related CVEs are processed") + return CVEEligibilityResult( + is_cve=True, + when_eligible_for_triage=WhenEligibility.LATER, + reason="Y-stream CVEs will be handled after all the z-streams are processed" + ) + + + # Process maintenance streams CVEs (X.10.z) after all the z-streams are processed + if re.match(r"^rhel-\d+\.10\.z$", target_version.lower()): + logger.info(f"Maintenance stream CVE {issue_key} will be handled after " + "all the z-streams related CVEs are processed") return CVEEligibilityResult( is_cve=True, - is_eligible_for_triage=False, - reason="Y-stream CVEs will be handled in Z-stream" + when_eligible_for_triage=WhenEligibility.LATER, + reason="Maintenance stream CVEs (X.10.z) will be handled after all the z-streams are processed" ) embargo = fields.get(EMBARGO_CUSTOM_FIELD, {}).get("value", "") if embargo == "True": return CVEEligibilityResult( is_cve=True, - is_eligible_for_triage=False, + when_eligible_for_triage=WhenEligibility.NEVER, reason="CVE is embargoed" ) @@ -256,7 +273,7 @@ async def check_cve_triage_eligibility( return CVEEligibilityResult( is_cve=True, - is_eligible_for_triage=True, + when_eligible_for_triage=WhenEligibility.IMMEDIATELY, reason=reason, needs_internal_fix=needs_internal_fix ) @@ -407,3 +424,31 @@ async def verify_issue_author( group.get("name") == RH_EMPLOYEE_GROUP for group in user_data.get("groups", {}).get("items", []) ) + +async def check_z_stream_errata_shipped( + issue_key: Annotated[str, Field(description="Jira issue key (e.g. RHEL-12345)")], + branch: Annotated[str, Field(description="Branch name (e.g. 'rhel-9.8')")], +) -> bool: + """ + Checks if the issue is for the Z-stream, if so, it checks if the issue is closed + (it means the errata has been shipped). If the issue is not for the Z-stream, it returns True. + """ + if not re.match(r"^rhel-\d+\.\d+$", branch.lower()): + return True + + headers = _get_jira_headers(os.getenv("JIRA_TOKEN")) + async with aiohttp.ClientSession() as session: + try: + async with session.get( + urljoin(os.getenv("JIRA_URL"), f"rest/api/2/issue/{issue_key}"), + params={"fields": "status, assignee"}, + headers=headers, + ) as response: + response.raise_for_status() + current_issue = await response.json() + current_status = current_issue.get("fields", {}).get("status", {}).get("name", "") + current_assignee = current_issue.get("fields", {}).get("assignee", {}).get("name", "") + except aiohttp.ClientError as e: + raise ToolError(f"Failed to get Jira data: {e}") from e + + return current_status.lower() == "closed" diff --git a/openshift/Makefile b/openshift/Makefile index 659a56ce..c4ed1888 100644 --- a/openshift/Makefile +++ b/openshift/Makefile @@ -5,4 +5,8 @@ run-jira-issue-fetcher: oc delete job jira-issue-fetcher-manual || true oc create job jira-issue-fetcher-manual --from=cronjob/jira-issue-fetcher -.PHONY: deploy run-jira-issue-fetcher +run-jira-postponed-issue-fetcher: + oc delete job jira-postponed-issue-fetcher-manual || true + oc create job jira-postponed-issue-fetcher-manual --from=cronjob/jira-postponed-issue-fetcher + +.PHONY: deploy run-jira-issue-fetcher run-jira-postponed-issue-fetcher diff --git a/openshift/cronjob-jira-postponed-issue-fetcher.yml b/openshift/cronjob-jira-postponed-issue-fetcher.yml new file mode 100644 index 00000000..b8ad8ae2 --- /dev/null +++ b/openshift/cronjob-jira-postponed-issue-fetcher.yml @@ -0,0 +1,62 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: jira-postponed-issue-fetcher + labels: + app: jira-postponed-issue-fetcher + component: scheduler +spec: + schedule: "0 0 * * *" # Once a day at midnight + concurrencyPolicy: Forbid # Prevent overlapping runs + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 5 + suspend: false + jobTemplate: + metadata: + labels: + app: jira-postponed-issue-fetcher + component: job + spec: + backoffLimit: 2 + activeDeadlineSeconds: 1800 # 30 minutes max runtime + template: + metadata: + labels: + app: jira-postponed-issue-fetcher + component: pod + spec: + restartPolicy: Never + containers: + - name: jira-issue-fetcher-env + image: 'jira-issue-fetcher:prod' + imagePullPolicy: Always + envFrom: + - configMapRef: + name: endpoints-env + - configMapRef: + name: jira-env + - secretRef: + name: jira-env + - configMapRef: + name: jira-issue-fetcher-env + env: + - name: ONLY_POSTPONED_RETRY + value: "true" + resources: + limits: + cpu: "200m" + securityContext: + allowPrivilegeEscalation: false + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + dnsPolicy: ClusterFirst + schedulerName: default-scheduler + terminationGracePeriodSeconds: 30