Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions Containerfile.supervisor
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ RUN dnf -y install --allowerasing \
python3-backoff \
python3-beautifulsoup4 \
python3-pip \
python3-nitrate \
python3-requests-gssapi \
python3-rpm \
python3-tomli-w \
Expand Down Expand Up @@ -44,6 +45,13 @@ COPY common/ /home/beeai/common/
COPY supervisor/ /home/beeai/supervisor/
RUN chgrp -R root /home/beeai && chmod -R g+rX /home/beeai

COPY <<EOF /etc/nitrate.conf
[nitrate]
url = https://tcms.engineering.redhat.com/xmlrpc/
username=anon
password=whatever
EOF

USER beeai
ENV HOME=/home/beeai
WORKDIR $HOME
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ dependencies = [
"backoff>=2.2.1",
"tomli-w>=1.2.0",
"flexmock>=0.12.2",
"nitrate>=1.9.0",
]

[project.optional-dependencies]
Expand Down
102 changes: 102 additions & 0 deletions supervisor/ewa_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
#!/usr/bin/python3
import typer
import re
import nitrate

# This script extracts information from the TCMS notes fields as generated
# by Errata Workflow Automation (EWA). An example entry looks like:
#
# ```
# [structured-field-start]
# This is StructuredField version 1. Please, edit with care.
#
# [errata-resolution]
# Old PASSED & New PASSED => WORKING
#
# [result-summary]
# old-files = PASSED [3-0/3]
# new-files = PASSED [3-0/3]
# old-avc = PASSED [3-0/3]
# new-avc = PASSED [3-0/3]
# old-duration = 0:00:21 [0:00:19 - 0:00:37]
# new-duration = 0:00:30 [0:00:18 - 0:00:38]
#
# [result-details]
# beaker-task = https://beaker.engineering.redhat.com/tasks/executed?recipe_task_id=203402088&recipe_task_id=203402051&recipe_task_id=203402393&recipe_task_id=203402430&recipe_task_id=203402125&recipe_task_id=203402467&old_pkg_tasks=203402393,203402467,203402430&new_pkg_tasks=203402088,203402125,203402051
# tcms-results-version = 3.0
#
# [structured-field-end]
# ```
# Rather than formally parsing this (using qe.py) we just pull out the lines
# we are interested in using a regular expression.

# Include only meaningful lines from the notes
NOTES_INCLUDE_PATTERN = re.compile(r'CR#|=>|-files|-avc|beaker-task')
# Unless called with --full, skip the Errata Workflow caseruns
CASERUN_EXCLUDE_PATTERN = re.compile(r'Errata Workflow')

# get the run details from the run specified by its id and return
# a multiline string containing the results and for tests which didn't
# PASS, also usable information from caserun.notes:
# 1) errata resulotion (comparison of results with unfixed / old and
# fixed / new builds
# 2) link to results in Beaker
# optionally you can enable:
# full output (include Errata Workflow and print full notes for all tests)
# color output (include escape codes for colored test status)
def get_tcms_run_details(run_id: str, *, full: bool = False, color: bool = False) -> str:
"""
Fetches and filters TCMS test run details.
"""
if not color:
nitrate.set_color_mode(nitrate.COLOR_OFF)
else:
nitrate.set_color_mode(nitrate.COLOR_ON)

testrun = nitrate.TestRun(int(run_id))
output = []

for caserun in testrun.caseruns:
caserun_str = str(caserun)
notes_str = str(caserun.notes)

passed = (caserun.status == nitrate.Status('PASSED'))

output_entry = []
if full:
output_entry.append(caserun_str)
# The original script used print() which added a newline.
# To replicate that, we split the notes into lines and add them
# individually to our list.
output_entry.extend(notes_str.splitlines())
else:
# filter out Errata Workflow and not useful lines from notes
if CASERUN_EXCLUDE_PATTERN.search(caserun_str):
continue
output_entry.append(caserun_str)
# add the details from notes only for tests that do not pass
if not passed:
for line in notes_str.splitlines():
if NOTES_INCLUDE_PATTERN.search(line):
output_entry.append(line)
if output_entry:
output.append(output_entry)

# Flatten the list of lists into a single list of strings
flattened_output = [item for sublist in output for item in sublist]
# Join the strings into a single multiline string
return "\n".join(flattened_output)

def main(
test_run_id: str,
full: bool = typer.Option(False, help="Show the full, unfiltered output."),
color: bool = typer.Option(False, help="Enable colorized output.")
):

# get_tcms_run_details returns a single string.
result = get_tcms_run_details(test_run_id, full=full, color=color)
if result:
print(result)

if __name__ == "__main__":
typer.run(main)
25 changes: 20 additions & 5 deletions supervisor/testing_analyst.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
from .tools.read_issue import ReadIssueTool
from .tools.read_logfile import ReadLogfileTool
from .tools.search_resultsdb import SearchResultsdbTool
from .tools.analyze_ewa_testrun import AnalyzeEwaTestRunTool

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -58,13 +59,22 @@ class OutputSchema(BaseModel):
+ """
For components handled by the New Errata Workflow Automation(NEWA):
NEWA will post a comment to the erratum when it has started tests and when they finish.
Read the JIRA issue in those comments to find test results. Ignore any comments
with links to TCMS or Beaker; older EWA automation may have run in parallel
with NEWA, but should be ignored.
Read the JIRA issue in those comments to find test results.
For components handled by Errata Workflow Automation (EWA):
EWA will post a comment to the erratum when it has started tests and when they finish.
Read the comment to find the test results in TCMS Test Run.

If the test location data says that tests are started by NEWA, but there are no comments
from NEWA providing links to JIRA issues, then this component may be a component where
NEWA is only used for RHEL10, and not earlier versions - in that case, you may read the
results from the TCMS test run posted by EWA.

In all other cases, if the tests are supposed to be started by NEWA, ignore any comments with
links to TCMS or Beaker.

You cannot assume that tests have passed just because a comment says they have
finished, it is mandatory to check the actual test results in the JIRA issue.
Make sure that the JIRA issue is the correct issue for the latest build in the
finished, it is mandatory to check the actual test results in the JIRA issue or TCMS.
Make sure that the JIRA issue or TCMS Test Run is the correct one for the latest build in the
erratum.

Tests can trigger at various points in an issue's lifecycle depending on component
Expand All @@ -89,6 +99,10 @@ class OutputSchema(BaseModel):
state: tests-passed
comment: [Give a brief summary of what was tested with a link to the result.]

If there are *some* test failures, but you are sure they are not regressions and most tests complete successfully:
state: tests-waived
comment: [Explain which tests failed and why they are not considered regressions]

If the tests will be started automatically without user intervention, but are not yet running:
state: tests-pending
comment: [Provide a brief description of what tests are expected to run and where the results will be]
Expand Down Expand Up @@ -169,6 +183,7 @@ async def analyze_issue(
ReadReadmeTool(),
ReadIssueTool(),
SearchResultsdbTool(),
AnalyzeEwaTestRunTool(),
]

agent = ToolCallingAgent(
Expand Down
50 changes: 50 additions & 0 deletions supervisor/tools/analyze_ewa_testrun.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import logging
from pydantic import BaseModel, Field

from beeai_framework.context import RunContext
from beeai_framework.emitter import Emitter
from beeai_framework.tools import StringToolOutput, Tool, ToolRunOptions

from ..ewa_utils import get_tcms_run_details

logger = logging.getLogger(__name__)


class AnalyzeEwaTestRunToolInput(BaseModel):
run_id: int = Field(description="TCMS Test Run ID (e.g., 12345)")


class AnalyzeEwaTestRunTool(Tool[AnalyzeEwaTestRunToolInput, ToolRunOptions, StringToolOutput]):
name = "analyze_ewa_testrun" # type: ignore
description = ( # type: ignore
"Analyzes a TCMS test run generated by Errata Workflow Automation (EWA) "
"and creates a test-case by test-case report."
)
input_schema = AnalyzeEwaTestRunToolInput # type: ignore

def _create_emitter(self) -> Emitter:
return Emitter.root().child(
namespace=["tool", "analyze_ewa_testrun"],
creator=self,
)

async def _run(
self,
input: AnalyzeEwaTestRunToolInput,
options: ToolRunOptions | None,
context: RunContext,
) -> StringToolOutput:
try:
# Fetch the TCMS run details
run_details = get_tcms_run_details(input.run_id)

# Return the formatted details
return StringToolOutput(
result=run_details
)

except Exception as e:
logger.error(f"Failed to get TCMS run details for {input.run_id}: {e}")
return StringToolOutput(
result=f"Error: Failed to get TCMS run details for {input.run_id}: {str(e)}"
)