diff --git a/plugins/action/nd_switches_validate.py b/plugins/action/nd_switches_validate.py new file mode 100644 index 000000000..ba96f29bb --- /dev/null +++ b/plugins/action/nd_switches_validate.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""ND Switches Validation Action Plugin. + +Validates switch data returned from nd_rest against expected +configuration entries. Checks that every entry in test_data has a matching +switch in the ND API response (fabricManagementIp == seed_ip, +switchRole == role). + +Supports an optional ``mode`` argument: + - ``"both"`` (default): match by seed_ip AND role. + - ``"ip"``: match by seed_ip only (role is ignored). + - ``"role"``: match by role only (seed_ip is ignored). +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type # pylint: disable=invalid-name + +import json +from typing import Any, Dict, List, Optional, Union + +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + BaseModel, + HAS_PYDANTIC, + ValidationError, + field_validator, + model_validator, +) + +try: + from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import ( + SwitchConfigModel, + ) + from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import ( + SwitchDataModel, + ) + + HAS_MODELS = True +except ImportError: + HAS_MODELS = False + +display = Display() + + +# --------------------------------------------------------------------------- +# Validation orchestration model +# --------------------------------------------------------------------------- + + +class SwitchesValidate(BaseModel): + """Orchestrates the match between playbook config entries and live ND inventory.""" + + config_data: Optional[List[Any]] = None + nd_data: Optional[List[Any]] = None + ignore_fields: Optional[Dict[str, int]] = None + response: Union[bool, None] = None + + @field_validator("config_data", mode="before") + @classmethod + def parse_config_data(cls, value): + """Coerce raw dicts into SwitchConfigModel instances. + + Accepts a single dict or a list of dicts. + """ + if isinstance(value, dict): + return [SwitchConfigModel.model_validate(value)] + if isinstance(value, list): + try: + return [(SwitchConfigModel.model_validate(item) if isinstance(item, dict) else item) for item in value] + except (ValidationError, ValueError) as e: + raise ValueError("Invalid format in Config Data: {0}".format(e)) + if value is None: + return None + raise ValueError("Config Data must be a single/list of dictionary, or None.") + + @field_validator("nd_data", mode="before") + @classmethod + def parse_nd_data(cls, value): + """Coerce raw ND API switch dicts into SwitchDataModel instances.""" + if isinstance(value, list): + try: + return [(SwitchDataModel.from_response(item) if isinstance(item, dict) else item) for item in value] + except (ValidationError, ValueError) as e: + raise ValueError("Invalid format in ND Response: {0}".format(e)) + if value is None: + return None + raise ValueError("ND Response must be a list of dictionaries.") + + @model_validator(mode="after") + def validate_lists_equality(self): + """Match every config entry against the live ND switch inventory. + + Sets ``self.response = True`` when all entries match, ``False`` otherwise. + Respects ``ignore_fields`` to support ip-only or role-only matching modes. + + Role comparison uses SwitchRole enum equality — no string normalization needed. + """ + config_data = self.config_data + nd_data_list = self.nd_data + ignore_fields = self.ignore_fields + + # Both empty → nothing to validate, treat as success. + # Exactly one empty → mismatch, treat as failure. + if not config_data and not nd_data_list: + self.response = True + return self + if not config_data or not nd_data_list: + self.response = False + return self + + missing_ips = [] + role_mismatches = {} + nd_data_copy = nd_data_list.copy() + matched_indices = set() + + for config_item in config_data: + found_match = False + seed_ip = config_item.seed_ip + role_expected = config_item.role # SwitchRole enum or None + + for i, nd_item in enumerate(nd_data_copy): + if i in matched_indices: + continue + + ip_address = nd_item.fabric_management_ip + switch_role = nd_item.switch_role # SwitchRole enum or None + + seed_ip_match = (seed_ip is not None and ip_address is not None and ip_address == seed_ip) or bool(ignore_fields["seed_ip"]) + role_match = (role_expected is not None and switch_role is not None and switch_role == role_expected) or bool(ignore_fields["role"]) + + if seed_ip_match and role_match: + matched_indices.add(i) + found_match = True + if ignore_fields["seed_ip"]: + break + elif (seed_ip_match and role_expected is not None and switch_role is not None and switch_role != role_expected) or ignore_fields["role"]: + role_mismatches.setdefault( + seed_ip or ip_address, + { + "expected_role": (role_expected.value if role_expected else None), + "response_role": switch_role.value if switch_role else None, + }, + ) + matched_indices.add(i) + found_match = True + if ignore_fields["seed_ip"]: + break + + if not found_match and seed_ip is not None: + missing_ips.append(seed_ip) + + if not missing_ips and not role_mismatches: + self.response = True + else: + display.display("Invalid Data:") + if missing_ips: + display.display(" Missing IPs: {0}".format(missing_ips)) + if role_mismatches: + display.display(" Role mismatches: {0}".format(json.dumps(role_mismatches, indent=2))) + self.response = False + + return self + + +# --------------------------------------------------------------------------- +# Action plugin +# --------------------------------------------------------------------------- + + +class ActionModule(ActionBase): + """Ansible action plugin for validating ND switch inventory data. + + Arguments (task args): + nd_data (dict): The registered result of a cisco.nd.nd_rest GET call. + test_data (list|dict): Expected switch entries, each with ``seed_ip`` + and optionally ``role``. + changed (bool, optional): If provided and False, the task fails + immediately (used to assert an upstream + operation produced a change). + mode (str, optional): ``"both"`` (default), ``"ip"``, or ``"role"``. + """ + + def run(self, tmp=None, task_vars=None): + results = super(ActionModule, self).run(tmp, task_vars) + results["failed"] = False + + if not HAS_PYDANTIC or not HAS_MODELS: + results["failed"] = True + results["msg"] = "pydantic and the ND collection models are required for nd_switches_validate" + return results + + nd_data = self._task.args["nd_data"] + test_data = self._task.args["test_data"] + + # Fail fast if the caller signals that no change occurred when one was expected. + if "changed" in self._task.args and not self._task.args["changed"]: + results["failed"] = True + results["msg"] = 'Changed is "false"' + return results + + # Fail fast if the upstream nd_rest task itself failed. + if nd_data.get("failed"): + results["failed"] = True + results["msg"] = nd_data.get("msg", "ND module returned a failure") + return results + + # Extract switch list from nd_data.current.switches + switches = nd_data.get("current", {}).get("switches", []) + + # Normalise test_data to a list. + if isinstance(test_data, dict): + test_data = [test_data] + + # If both are empty treat as success; if only nd response is empty it's a failure. + if not switches and not test_data: + results["msg"] = "Validation Successful!" + return results + + if not switches: + results["failed"] = True + results["msg"] = "No switches found in ND response" + return results + + # Resolve matching mode via ignore_fields flags. + ignore_fields = {"seed_ip": 0, "role": 0} + if "mode" in self._task.args: + mode = self._task.args["mode"].lower() + if mode == "ip": + # IP mode: only match by seed_ip, ignore role + ignore_fields["role"] = 1 + elif mode == "role": + # Role mode: only match by role, ignore seed_ip + ignore_fields["seed_ip"] = 1 + + validation = SwitchesValidate( + config_data=test_data, + nd_data=switches, + ignore_fields=ignore_fields, + response=None, + ) + + if validation.response: + results["msg"] = "Validation Successful!" + else: + results["failed"] = True + results["msg"] = "Validation Failed! Please check output above." + + return results diff --git a/plugins/module_utils/common/pydantic_compat.py b/plugins/module_utils/common/pydantic_compat.py index b26559d28..3bedda958 100644 --- a/plugins/module_utils/common/pydantic_compat.py +++ b/plugins/module_utils/common/pydantic_compat.py @@ -48,6 +48,7 @@ StrictBool, SecretStr, ValidationError, + ValidationInfo, field_serializer, model_serializer, field_validator, @@ -73,6 +74,7 @@ StrictBool, SecretStr, ValidationError, + ValidationInfo, field_serializer, model_serializer, field_validator, @@ -191,6 +193,14 @@ def __init__(self, message="A custom error occurred."): def __str__(self): return f"ValidationError: {self.message}" + # Fallback: ValidationInfo placeholder class that does nothing + class ValidationInfo: + """Pydantic ValidationInfo fallback when pydantic is not available.""" + + def __init__(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) + # Fallback: model_validator decorator that does nothing def model_validator(*args, **kwargs): # pylint: disable=unused-argument """Pydantic model_validator fallback when pydantic is not available.""" @@ -276,6 +286,7 @@ def main(): "StrictBool", "SecretStr", "ValidationError", + "ValidationInfo", "field_serializer", "model_serializer", "field_validator", diff --git a/plugins/module_utils/endpoints/mixins.py b/plugins/module_utils/endpoints/mixins.py index e7f0620c9..9cd60fffd 100644 --- a/plugins/module_utils/endpoints/mixins.py +++ b/plugins/module_utils/endpoints/mixins.py @@ -32,6 +32,12 @@ class FabricNameMixin(BaseModel): fabric_name: Optional[str] = Field(default=None, min_length=1, max_length=64, description="Fabric name") +class FilterMixin(BaseModel): + """Mixin for endpoints that require a Lucene filter expression.""" + + filter: Optional[str] = Field(default=None, min_length=1, description="Lucene filter expression") + + class ForceShowRunMixin(BaseModel): """Mixin for endpoints that require force_show_run parameter.""" @@ -62,6 +68,12 @@ class LoginIdMixin(BaseModel): login_id: Optional[str] = Field(default=None, min_length=1, description="Login ID") +class MaxMixin(BaseModel): + """Mixin for endpoints that require a max results parameter.""" + + max: Optional[int] = Field(default=None, ge=1, description="Maximum number of results") + + class NetworkNameMixin(BaseModel): """Mixin for endpoints that require network_name parameter.""" @@ -74,12 +86,24 @@ class NodeNameMixin(BaseModel): node_name: Optional[str] = Field(default=None, min_length=1, description="Node name") +class OffsetMixin(BaseModel): + """Mixin for endpoints that require a pagination offset parameter.""" + + offset: Optional[int] = Field(default=None, ge=0, description="Pagination offset") + + class SwitchSerialNumberMixin(BaseModel): """Mixin for endpoints that require switch_sn parameter.""" switch_sn: Optional[str] = Field(default=None, min_length=1, description="Switch serial number") +class TicketIdMixin(BaseModel): + """Mixin for endpoints that require ticket_id parameter.""" + + ticket_id: Optional[str] = Field(default=None, min_length=1, description="Change control ticket ID") + + class VrfNameMixin(BaseModel): """Mixin for endpoints that require vrf_name parameter.""" diff --git a/plugins/module_utils/endpoints/query_params.py b/plugins/module_utils/endpoints/query_params.py index 2cddd97d2..6d15c2abf 100644 --- a/plugins/module_utils/endpoints/query_params.py +++ b/plugins/module_utils/endpoints/query_params.py @@ -209,8 +209,16 @@ def to_query_string(self, url_encode: bool = True) -> str: params = [] for field_name, field_value in self.model_dump(exclude_none=True).items(): if field_value is not None: - # URL-encode the value if requested - encoded_value = quote(str(field_value), safe="") if url_encode else str(field_value) + # URL-encode the value if requested. + # Lucene filter expressions require ':' and ' ' to remain unencoded + # so the server-side parser can recognise the field:value syntax. + if url_encode: + # Keep ':' unencoded so Lucene field:value syntax is preserved. + # Spaces are encoded as %20 so the query string is valid in URLs. + safe_chars = ":" if field_name == "filter" else "" + encoded_value = quote(str(field_value), safe=safe_chars) + else: + encoded_value = str(field_value) params.append(f"{field_name}={encoded_value}") return "&".join(params) diff --git a/plugins/module_utils/endpoints/v1/manage/manage_credentials_switches.py b/plugins/module_utils/endpoints/v1/manage/manage_credentials_switches.py new file mode 100644 index 000000000..17ac0312d --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_credentials_switches.py @@ -0,0 +1,149 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Credentials endpoint models. + +This module contains endpoint definitions for switch credential operations +in the ND Manage API. + +Endpoints covered: +- List switch credentials +- Create switch credentials +- Remove switch credentials +- Validate switch credentials +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat C S" +# pylint: enable=invalid-name + +from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + TicketIdMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) + + +class CredentialsSwitchesEndpointParams(TicketIdMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for credentials switches endpoint. + + ## Parameters + + - ticket_id: Change control ticket ID (optional, from `TicketIdMixin`) + + ## Usage + + ```python + params = CredentialsSwitchesEndpointParams(ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "ticketId=CHG12345" + ``` + """ + + +class _EpManageCredentialsSwitchesBase(NDEndpointBaseModel): + """ + Base class for Credentials Switches endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/credentials/switches endpoint. + """ + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + return BasePath.path("credentials", "switches") + + +class EpManageCredentialsSwitchesPost(_EpManageCredentialsSwitchesBase): + """ + # Summary + + Create Switch Credentials Endpoint + + ## Description + + Endpoint to save switch credentials for the user. + + ## Path + + - /api/v1/manage/credentials/switches + - /api/v1/manage/credentials/switches?ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Create credentials without ticket + request = EpManageCredentialsSwitchesPost() + path = request.path + verb = request.verb + + # Create credentials with change control ticket + request = EpManageCredentialsSwitchesPost() + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/credentials/switches?ticketId=CHG12345 + ``` + """ + + class_name: Literal["EpManageCredentialsSwitchesPost"] = Field( + default="EpManageCredentialsSwitchesPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: CredentialsSwitchesEndpointParams = Field( + default_factory=CredentialsSwitchesEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{self._base_path}?{query_string}" + return self._base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py index 8a9b1c2bc..8ae8803af 100644 --- a/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics.py @@ -11,18 +11,20 @@ ## Endpoints -- `EpApiV1ManageFabricsGet` - Get a specific fabric by name +- `EpManageFabricsGet` - Get a specific fabric by name (GET /api/v1/manage/fabrics/{fabric_name}) -- `EpApiV1ManageFabricsListGet` - List all fabrics with optional filtering +- `EpManageFabricsListGet` - List all fabrics with optional filtering (GET /api/v1/manage/fabrics) -- `EpApiV1ManageFabricsPost` - Create a new fabric +- `EpManageFabricsPost` - Create a new fabric (POST /api/v1/manage/fabrics) -- `EpApiV1ManageFabricsPut` - Update a specific fabric +- `EpManageFabricsPut` - Update a specific fabric (PUT /api/v1/manage/fabrics/{fabric_name}) -- `EpApiV1ManageFabricsDelete` - Delete a specific fabric +- `EpManageFabricsDelete` - Delete a specific fabric (DELETE /api/v1/manage/fabrics/{fabric_name}) -- `EpApiV1ManageFabricsSummaryGet` - Get summary for a specific fabric +- `EpManageFabricsSummaryGet` - Get summary for a specific fabric (GET /api/v1/manage/fabrics/{fabric_name}/summary) +- `EpManageFabricConfigDeployPost` - Deploy pending config for a fabric + (POST /api/v1/manage/fabrics/{fabric_name}/actions/configDeploy) """ from __future__ import annotations @@ -67,6 +69,30 @@ class FabricsEndpointParams(EndpointQueryParams): ) +class FabricConfigDeployEndpointParams(EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for the fabric config deploy endpoint. + + ## Parameters + + - force_show_run: Force show running config before deploy (optional) + - incl_all_msd_switches: Include all MSD fabric switches (optional) + + ## Usage + + ```python + params = FabricConfigDeployEndpointParams(force_show_run=True) + query_string = params.to_query_string() + # Returns: "forceShowRun=true" + ``` + """ + + force_show_run: Optional[bool] = Field(default=None, description="Force show running config before deploy") + incl_all_msd_switches: Optional[bool] = Field(default=None, description="Include all MSD fabric switches") + + class _EpManageFabricsBase(FabricNameMixin, NDEndpointBaseModel): """ Base class for ND Manage Fabrics endpoints. @@ -149,14 +175,14 @@ class EpManageFabricsGet(_EpManageFabricsBase): ```python # Get details for a specific fabric - request = EpApiV1ManageFabricsGet() + request = EpManageFabricsGet() request.fabric_name = "my-fabric" path = request.path verb = request.verb # Path will be: /api/v1/manage/fabrics/my-fabric # Get fabric details targeting a specific cluster in a multi-cluster deployment - request = EpApiV1ManageFabricsGet() + request = EpManageFabricsGet() request.fabric_name = "my-fabric" request.endpoint_params.cluster_name = "cluster1" path = request.path @@ -165,7 +191,7 @@ class EpManageFabricsGet(_EpManageFabricsBase): ``` """ - class_name: Literal["EpApiV1ManageFabricsGet"] = Field(default="EpApiV1ManageFabricsGet", description="Class name for backward compatibility") + class_name: Literal["EpManageFabricsGet"] = Field(default="EpManageFabricsGet", description="Class name for backward compatibility") endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") @@ -261,13 +287,13 @@ class EpManageFabricsListGet(_EpManageFabricsBase): ```python # List all fabrics - ep = EpApiV1ManageFabricsListGet() + ep = EpManageFabricsListGet() path = ep.path verb = ep.verb # Path: /api/v1/manage/fabrics # List fabrics with filtering and pagination - ep = EpApiV1ManageFabricsListGet() + ep = EpManageFabricsListGet() ep.endpoint_params.category = "fabric" ep.endpoint_params.max = 10 path = ep.path @@ -277,7 +303,7 @@ class EpManageFabricsListGet(_EpManageFabricsBase): _require_fabric_name: ClassVar[bool] = False - class_name: Literal["EpApiV1ManageFabricsListGet"] = Field(default="EpApiV1ManageFabricsListGet", description="Class name for backward compatibility") + class_name: Literal["EpManageFabricsListGet"] = Field(default="EpManageFabricsListGet", description="Class name for backward compatibility") endpoint_params: FabricsListEndpointParams = Field(default_factory=FabricsListEndpointParams, description="Endpoint-specific query parameters") @@ -325,7 +351,7 @@ class EpManageFabricsPost(_EpManageFabricsBase): ## Usage ```python - ep = EpApiV1ManageFabricsPost() + ep = EpManageFabricsPost() rest_send.path = ep.path rest_send.verb = ep.verb rest_send.payload = { @@ -339,7 +365,7 @@ class EpManageFabricsPost(_EpManageFabricsBase): _require_fabric_name: ClassVar[bool] = False - class_name: Literal["EpApiV1ManageFabricsPost"] = Field(default="EpApiV1ManageFabricsPost", description="Class name for backward compatibility") + class_name: Literal["EpManageFabricsPost"] = Field(default="EpManageFabricsPost", description="Class name for backward compatibility") endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") @@ -381,7 +407,7 @@ class EpManageFabricsPut(_EpManageFabricsBase): ## Usage ```python - ep = EpApiV1ManageFabricsPut() + ep = EpManageFabricsPut() ep.fabric_name = "my-fabric" rest_send.path = ep.path rest_send.verb = ep.verb @@ -393,7 +419,7 @@ class EpManageFabricsPut(_EpManageFabricsBase): ``` """ - class_name: Literal["EpApiV1ManageFabricsPut"] = Field(default="EpApiV1ManageFabricsPut", description="Class name for backward compatibility") + class_name: Literal["EpManageFabricsPut"] = Field(default="EpManageFabricsPut", description="Class name for backward compatibility") endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") @@ -430,14 +456,14 @@ class EpManageFabricsDelete(_EpManageFabricsBase): ## Usage ```python - ep = EpApiV1ManageFabricsDelete() + ep = EpManageFabricsDelete() ep.fabric_name = "my-fabric" rest_send.path = ep.path rest_send.verb = ep.verb ``` """ - class_name: Literal["EpApiV1ManageFabricsDelete"] = Field(default="EpApiV1ManageFabricsDelete", description="Class name for backward compatibility") + class_name: Literal["EpManageFabricsDelete"] = Field(default="EpManageFabricsDelete", description="Class name for backward compatibility") endpoint_params: FabricsEndpointParams = Field(default_factory=FabricsEndpointParams, description="Endpoint-specific query parameters") @@ -474,7 +500,7 @@ class EpManageFabricsSummaryGet(_EpManageFabricsBase): ## Usage ```python - ep = EpApiV1ManageFabricsSummaryGet() + ep = EpManageFabricsSummaryGet() ep.fabric_name = "my-fabric" path = ep.path verb = ep.verb @@ -482,9 +508,7 @@ class EpManageFabricsSummaryGet(_EpManageFabricsBase): ``` """ - class_name: Literal["EpApiV1ManageFabricsSummaryGet"] = Field( - default="EpApiV1ManageFabricsSummaryGet", description="Class name for backward compatibility" - ) + class_name: Literal["EpManageFabricsSummaryGet"] = Field(default="EpManageFabricsSummaryGet", description="Class name for backward compatibility") _path_suffix: ClassVar[Optional[str]] = "summary" @@ -494,3 +518,69 @@ class EpManageFabricsSummaryGet(_EpManageFabricsBase): def verb(self) -> HttpVerbEnum: """Return the HTTP verb for this endpoint.""" return HttpVerbEnum.GET + + +class EpManageFabricConfigDeployPost(_EpManageFabricsBase): + """ + # Summary + + Fabric Config Deploy Endpoint + + ## Description + + Endpoint to deploy pending configuration to all switches in a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabric_name}/actions/configDeploy + - /api/v1/manage/fabrics/{fabric_name}/actions/configDeploy?forceShowRun=true + + ## Verb + + - POST + + ## Query Parameters + + - force_show_run: Force show running config before deploy (optional) + - incl_all_msd_switches: Include all MSD fabric switches (optional) + + ## Usage + + ```python + ep = EpManageFabricConfigDeployPost() + ep.fabric_name = "MyFabric" + path = ep.path + verb = ep.verb + + # With forceShowRun + ep.endpoint_params.force_show_run = True + path = ep.path + # Path: /api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true + ``` + """ + + class_name: Literal["EpManageFabricConfigDeployPost"] = Field( + default="EpManageFabricConfigDeployPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: FabricConfigDeployEndpointParams = Field( + default_factory=FabricConfigDeployEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """Build the endpoint path with optional query string.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + base = BasePath.path("fabrics", self.fabric_name, "actions", "configDeploy") + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py new file mode 100644 index 000000000..4019af51a --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_actions.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Discovery endpoint models. + +This module contains endpoint definitions for switch discovery operations +within fabrics in the ND Manage API. + +Endpoints covered: +- Shallow discovery +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat C S" +# pylint: enable=invalid-name + +from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + FabricNameMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) + + +class _EpManageFabricsActionsBase(FabricNameMixin, NDEndpointBaseModel): + """ + Base class for Fabric Actions endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/actions endpoint. + """ + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "actions") + + +class EpManageFabricsActionsShallowDiscoveryPost(_EpManageFabricsActionsBase): + """ + # Summary + + Shallow Discovery Endpoint + + ## Description + + Endpoint to shallow discover switches given seed switches with hop count. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/actions/shallowDiscovery + + ## Verb + + - POST + + ## Usage + + ```python + request = EpManageFabricsActionsShallowDiscoveryPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + ``` + """ + + class_name: Literal["EpManageFabricsActionsShallowDiscoveryPost"] = Field( + default="EpManageFabricsActionsShallowDiscoveryPost", + frozen=True, + description="Class name for backward compatibility", + ) + + @property + def path(self) -> str: + """Build the endpoint path.""" + return f"{self._base_path}/shallowDiscovery" + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class EpManageFabricsActionsConfigSavePost(_EpManageFabricsActionsBase): + """ + # Summary + + Fabric Config Save Endpoint + + ## Description + + Endpoint to save (recalculate) fabric configuration. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/actions/configSave + + ## Verb + + - POST + + ## Usage + + ```python + request = EpManageFabricsActionsConfigSavePost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + ``` + """ + + class_name: Literal["EpManageFabricsActionsConfigSavePost"] = Field( + default="EpManageFabricsActionsConfigSavePost", + frozen=True, + description="Class name for backward compatibility", + ) + + @property + def path(self) -> str: + """Build the endpoint path.""" + return f"{self._base_path}/configSave" + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py new file mode 100644 index 000000000..d61f1e430 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_bootstrap.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Bootstrap endpoint models. + +This module contains endpoint definitions for switch bootstrap operations +within fabrics in the ND Manage API. + +Endpoints covered: +- List bootstrap switches (POAP/PnP) +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat C S" +# pylint: enable=invalid-name + +from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + FabricNameMixin, + FilterMixin, + MaxMixin, + OffsetMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) + + +class FabricsBootstrapEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for fabric bootstrap endpoint. + + ## Parameters + + - max: Maximum number of results to return (optional, from `MaxMixin`) + - offset: Pagination offset (optional, from `OffsetMixin`) + - filter: Lucene filter expression (optional, from `FilterMixin`) + + ## Usage + + ```python + params = FabricsBootstrapEndpointParams(max=50, offset=0) + query_string = params.to_query_string() + # Returns: "max=50&offset=0" + ``` + """ + + +class _EpManageFabricsBootstrapBase(FabricNameMixin, NDEndpointBaseModel): + """ + Base class for Fabric Bootstrap endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/bootstrap endpoint. + """ + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "bootstrap") + + +class EpManageFabricsBootstrapGet(_EpManageFabricsBootstrapBase): + """ + # Summary + + List Bootstrap Switches Endpoint + + ## Description + + Endpoint to list switches currently going through bootstrap loop via POAP (NX-OS) or PnP (IOS-XE). + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/bootstrap + - /api/v1/manage/fabrics/{fabricName}/bootstrap?max=50&offset=0 + + ## Verb + + - GET + + ## Query Parameters + + - max: Maximum number of results (optional) + - offset: Pagination offset (optional) + - filter: Lucene filter expression (optional) + + ## Usage + + ```python + # List all bootstrap switches + request = EpManageFabricsBootstrapGet() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # List with pagination + request = EpManageFabricsBootstrapGet() + request.fabric_name = "MyFabric" + request.endpoint_params.max = 50 + request.endpoint_params.offset = 0 + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/bootstrap?max=50&offset=0 + ``` + """ + + class_name: Literal["EpManageFabricsBootstrapGet"] = Field( + default="EpManageFabricsBootstrapGet", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: FabricsBootstrapEndpointParams = Field( + default_factory=FabricsBootstrapEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{self._base_path}?{query_string}" + return self._base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_inventory.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_inventory.py new file mode 100644 index 000000000..b4dd0247a --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_inventory.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabrics Inventory endpoint models. + +This module contains endpoint definitions for fabric inventory operations +in the ND Manage API. + +Endpoints covered: +- Inventory discover status +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat C S" +# pylint: enable=invalid-name + +from typing import Literal + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + FabricNameMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) + + +class _EpManageFabricsInventoryBase(FabricNameMixin, NDEndpointBaseModel): + """ + Base class for Fabric Inventory endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/inventory endpoint family. + """ + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name) + + +class EpManageFabricsInventoryDiscoverGet(_EpManageFabricsInventoryBase): + """ + # Summary + + Fabric Inventory Discover Endpoint + + ## Description + + Endpoint to get discovery status for switches in a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/inventory/discover + + ## Verb + + - GET + + ## Usage + + ```python + request = EpManageFabricsInventoryDiscoverGet() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + ``` + """ + + class_name: Literal["EpManageFabricsInventoryDiscoverGet"] = Field( + default="EpManageFabricsInventoryDiscoverGet", + frozen=True, + description="Class name for backward compatibility", + ) + + @property + def path(self) -> str: + """Build the endpoint path.""" + return f"{self._base_path}/inventory/discover" + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py new file mode 100644 index 000000000..2d5aaa424 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switchactions.py @@ -0,0 +1,604 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Switch Actions endpoint models. + +This module contains endpoint definitions for switch action operations +within fabrics in the ND Manage API. + +Endpoints covered: +- Remove switches (bulk delete) +- Change switch roles (bulk) +- Import bootstrap (POAP) +- Pre-provision switches +- Rediscover switches +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat C S" +# pylint: enable=invalid-name + +from typing import Literal, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + ClusterNameMixin, + FabricNameMixin, + TicketIdMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) + +# ============================================================================ +# Endpoint-specific query parameter classes +# ============================================================================ + + +class SwitchActionsRemoveEndpointParams(TicketIdMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch actions remove endpoint. + + ## Parameters + + - force: Force removal even if switches have pending operations (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + params = SwitchActionsRemoveEndpointParams(force=True, ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "force=true&ticketId=CHG12345" + ``` + """ + + force: Optional[bool] = Field(default=None, description="Force removal of switches") + + +class SwitchActionsTicketEndpointParams(TicketIdMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch action endpoints that accept a ticket ID. + + ## Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + params = SwitchActionsTicketEndpointParams(ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "ticketId=CHG12345" + ``` + """ + + +class SwitchActionsImportEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch import/provision endpoints. + + ## Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional, from `ClusterNameMixin`) + - ticket_id: Change control ticket ID (optional, from `TicketIdMixin`) + + ## Usage + + ```python + params = SwitchActionsImportEndpointParams(cluster_name="cluster1", ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "clusterName=cluster1&ticketId=CHG12345" + ``` + """ + + +# ============================================================================ +# Switch Actions Endpoints +# ============================================================================ + + +class _EpManageFabricsSwitchActionsBase(FabricNameMixin, NDEndpointBaseModel): + """ + Base class for Fabric Switch Actions endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/switchActions endpoint. + """ + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "switchActions") + + +class EpManageFabricsSwitchActionsRemovePost(_EpManageFabricsSwitchActionsBase): + """ + # Summary + + Remove Switches Endpoint (Bulk Delete) + + ## Description + + Endpoint to delete multiple switches from a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/remove + - /api/v1/manage/fabrics/{fabricName}/switchActions/remove?force=true&ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - force: Force removal even if switches have pending operations (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Remove switches + request = EpManageFabricsSwitchActionsRemovePost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Remove switches with force and ticket + request = EpManageFabricsSwitchActionsRemovePost() + request.fabric_name = "MyFabric" + request.endpoint_params.force = True + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switchActions/remove?force=true&ticketId=CHG12345 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchActionsRemovePost"] = Field( + default="EpManageFabricsSwitchActionsRemovePost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsRemoveEndpointParams = Field( + default_factory=SwitchActionsRemoveEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + base = f"{self._base_path}/remove" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class EpManageFabricsSwitchActionsChangeRolesPost(_EpManageFabricsSwitchActionsBase): + """ + # Summary + + Change Switch Roles Endpoint (Bulk) + + ## Description + + Endpoint to change the role of multiple switches in a single request. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/changeRoles + - /api/v1/manage/fabrics/{fabricName}/switchActions/changeRoles?ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Change roles + request = EpManageFabricsSwitchActionsChangeRolesPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Change roles with change control ticket + request = EpManageFabricsSwitchActionsChangeRolesPost() + request.fabric_name = "MyFabric" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switchActions/changeRoles?ticketId=CHG12345 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchActionsChangeRolesPost"] = Field( + default="EpManageFabricsSwitchActionsChangeRolesPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsTicketEndpointParams = Field( + default_factory=SwitchActionsTicketEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + base = f"{self._base_path}/changeRoles" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class EpManageFabricsSwitchActionsImportBootstrapPost(_EpManageFabricsSwitchActionsBase): + """ + # Summary + + Import Bootstrap Switches Endpoint + + ## Description + + Endpoint to import and bootstrap preprovision or bootstrap switches to a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/importBootstrap + - /api/v1/manage/fabrics/{fabricName}/switchActions/importBootstrap?clusterName=cluster1&ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Import bootstrap switches + request = EpManageFabricsSwitchActionsImportBootstrapPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Import with cluster and ticket + request = EpManageFabricsSwitchActionsImportBootstrapPost() + request.fabric_name = "MyFabric" + request.endpoint_params.cluster_name = "cluster1" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switchActions/importBootstrap?clusterName=cluster1&ticketId=CHG12345 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchActionsImportBootstrapPost"] = Field( + default="EpManageFabricsSwitchActionsImportBootstrapPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsImportEndpointParams = Field( + default_factory=SwitchActionsImportEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + base = f"{self._base_path}/importBootstrap" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +# ============================================================================ +# Pre-Provision Endpoints +# ============================================================================ + + +class EpManageFabricsSwitchActionsPreProvisionPost(_EpManageFabricsSwitchActionsBase): + """ + # Summary + + Pre-Provision Switches Endpoint + + ## Description + + Endpoint to pre-provision switches in a fabric. Pre-provisioning allows + you to define switch parameters (serial, IP, model, etc.) ahead of time + so that when the physical device boots it is automatically absorbed into + the fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/preProvision + - /api/v1/manage/fabrics/{fabricName}/switchActions/preProvision?clusterName=cluster1&ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Pre-provision switches + request = EpManageFabricsSwitchActionsPreProvisionPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Pre-provision with cluster and ticket + request = EpManageFabricsSwitchActionsPreProvisionPost() + request.fabric_name = "MyFabric" + request.endpoint_params.cluster_name = "cluster1" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switchActions/preProvision?clusterName=cluster1&ticketId=CHG12345 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchActionsPreProvisionPost"] = Field( + default="EpManageFabricsSwitchActionsPreProvisionPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsImportEndpointParams = Field( + default_factory=SwitchActionsImportEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + base = f"{self._base_path}/preProvision" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +# ============================================================================ +# Rediscover Endpoints +# ============================================================================ + + +class EpManageFabricsSwitchActionsRediscoverPost(_EpManageFabricsSwitchActionsBase): + """ + # Summary + + Rediscover Switches Endpoint + + ## Description + + Endpoint to trigger rediscovery for one or more switches in a fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/rediscover + - /api/v1/manage/fabrics/{fabricName}/switchActions/rediscover?ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Rediscover switches + request = EpManageFabricsSwitchActionsRediscoverPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Rediscover switches with change control ticket + request = EpManageFabricsSwitchActionsRediscoverPost() + request.fabric_name = "MyFabric" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switchActions/rediscover?ticketId=CHG12345 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchActionsRediscoverPost"] = Field( + default="EpManageFabricsSwitchActionsRediscoverPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsTicketEndpointParams = Field( + default_factory=SwitchActionsTicketEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + base = f"{self._base_path}/rediscover" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class EpManageFabricsSwitchActionsDeployPost(_EpManageFabricsSwitchActionsBase): + """ + # Summary + + Switch-Level Config Deploy Endpoint + + ## Description + + Endpoint to deploy pending configuration for specific switches in a fabric. + Unlike the global ``configDeploy`` endpoint, this deploys only the specified + switches identified by their serial numbers. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switchActions/deploy + - /api/v1/manage/fabrics/{fabricName}/switchActions/deploy?ticketId=CHG12345 + + ## Verb + + - POST + + ## Body + + ```json + {"switchIds": ["FOC21373AFA", "FVT93126SKE"]} + ``` + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + request = EpManageFabricsSwitchActionsDeployPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + # POST body: {"switchIds": ["FOC21373AFA"]} + ``` + """ + + class_name: Literal["EpManageFabricsSwitchActionsDeployPost"] = Field( + default="EpManageFabricsSwitchActionsDeployPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsTicketEndpointParams = Field( + default_factory=SwitchActionsTicketEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + base = f"{self._base_path}/deploy" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py new file mode 100644 index 000000000..fe28dfa49 --- /dev/null +++ b/plugins/module_utils/endpoints/v1/manage/manage_fabrics_switches.py @@ -0,0 +1,464 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) +""" +ND Manage Fabric Switches endpoint models. + +This module contains endpoint definitions for switch CRUD operations +within fabrics in the ND Manage API. + +Endpoints covered: +- List switches in a fabric +- Add switches to a fabric +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +__author__ = "Akshayanat C S" +# pylint: enable=invalid-name + +from typing import Literal, Optional + +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.mixins import ( + ClusterNameMixin, + FabricNameMixin, + FilterMixin, + MaxMixin, + OffsetMixin, + SwitchSerialNumberMixin, + TicketIdMixin, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.query_params import ( + EndpointQueryParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.base_path import ( + BasePath, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.base import ( + NDEndpointBaseModel, +) + + +class FabricSwitchesGetEndpointParams(FilterMixin, MaxMixin, OffsetMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for list fabric switches endpoint. + + ## Parameters + + - hostname: Filter by switch hostname (optional) + - max: Maximum number of results (optional, from `MaxMixin`) + - offset: Pagination offset (optional, from `OffsetMixin`) + - filter: Lucene filter expression (optional, from `FilterMixin`) + + ## Usage + + ```python + params = FabricSwitchesGetEndpointParams(hostname="leaf1", max=100) + query_string = params.to_query_string() + # Returns: "hostname=leaf1&max=100" + ``` + """ + + hostname: Optional[str] = Field(default=None, min_length=1, description="Filter by switch hostname") + + +class FabricSwitchesAddEndpointParams(ClusterNameMixin, TicketIdMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for add switches to fabric endpoint. + + ## Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional, from `ClusterNameMixin`) + - ticket_id: Change control ticket ID (optional, from `TicketIdMixin`) + + ## Usage + + ```python + params = FabricSwitchesAddEndpointParams(cluster_name="cluster1", ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "clusterName=cluster1&ticketId=CHG12345" + ``` + """ + + +class _EpManageFabricsSwitchesBase(FabricNameMixin, NDEndpointBaseModel): + """ + Base class for Fabric Switches endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/switches endpoint. + """ + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "switches") + + +class EpManageFabricsSwitchesGet(_EpManageFabricsSwitchesBase): + """ + # Summary + + List Fabric Switches Endpoint + + ## Description + + Endpoint to list all switches in a specific fabric with optional filtering. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches + - /api/v1/manage/fabrics/{fabricName}/switches?hostname=leaf1&max=100 + + ## Verb + + - GET + + ## Query Parameters + + - hostname: Filter by switch hostname (optional) + - max: Maximum number of results (optional) + - offset: Pagination offset (optional) + - filter: Lucene filter expression (optional) + + ## Usage + + ```python + # List all switches + request = EpManageFabricsSwitchesGet() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # List with filtering + request = EpManageFabricsSwitchesGet() + request.fabric_name = "MyFabric" + request.endpoint_params.hostname = "leaf1" + request.endpoint_params.max = 100 + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches?hostname=leaf1&max=100 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchesGet"] = Field( + default="EpManageFabricsSwitchesGet", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: FabricSwitchesGetEndpointParams = Field( + default_factory=FabricSwitchesGetEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{self._base_path}?{query_string}" + return self._base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.GET + + +class EpManageFabricsSwitchesPost(_EpManageFabricsSwitchesBase): + """ + # Summary + + Add Switches to Fabric Endpoint + + ## Description + + Endpoint to add switches to a specific fabric. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches + - /api/v1/manage/fabrics/{fabricName}/switches?clusterName=cluster1&ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Add switches + request = EpManageFabricsSwitchesPost() + request.fabric_name = "MyFabric" + path = request.path + verb = request.verb + + # Add switches with cluster and ticket + request = EpManageFabricsSwitchesPost() + request.fabric_name = "MyFabric" + request.endpoint_params.cluster_name = "cluster1" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches?clusterName=cluster1&ticketId=CHG12345 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchesPost"] = Field( + default="EpManageFabricsSwitchesPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: FabricSwitchesAddEndpointParams = Field( + default_factory=FabricSwitchesAddEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """ + # Summary + + Build the endpoint path with optional query string. + + ## Returns + + - Complete endpoint path string, optionally including query parameters + """ + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{self._base_path}?{query_string}" + return self._base_path + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +# ============================================================================ +# Per-Switch Action Endpoints +# ============================================================================ + + +class SwitchActionsTicketEndpointParams(TicketIdMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch action endpoints that accept a ticket ID. + + ## Parameters + + - ticket_id: Change control ticket ID (optional, from `TicketIdMixin`) + + ## Usage + + ```python + params = SwitchActionsTicketEndpointParams(ticket_id="CHG12345") + query_string = params.to_query_string() + # Returns: "ticketId=CHG12345" + ``` + """ + + +class SwitchActionsClusterEndpointParams(ClusterNameMixin, EndpointQueryParams): + """ + # Summary + + Endpoint-specific query parameters for switch action endpoints that accept only a cluster name. + + ## Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional, from `ClusterNameMixin`) + + ## Usage + + ```python + params = SwitchActionsClusterEndpointParams(cluster_name="cluster1") + query_string = params.to_query_string() + # Returns: "clusterName=cluster1" + ``` + """ + + +class _EpManageFabricsSwitchActionsPerSwitchBase(FabricNameMixin, SwitchSerialNumberMixin, NDEndpointBaseModel): + """ + Base class for per-switch action endpoints. + + Provides common functionality for all HTTP methods on the + /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions endpoint. + """ + + @property + def _base_path(self) -> str: + """Build the base endpoint path.""" + if self.fabric_name is None: + raise ValueError("fabric_name must be set before accessing path") + if self.switch_sn is None: + raise ValueError("switch_sn must be set before accessing path") + return BasePath.path("fabrics", self.fabric_name, "switches", self.switch_sn, "actions") + + +class EpManageFabricsSwitchProvisionRMAPost(_EpManageFabricsSwitchActionsPerSwitchBase): + """ + # Summary + + Provision RMA for Switch Endpoint + + ## Description + + Endpoint to RMA (Return Material Authorization) an existing switch with a new bootstrapped switch. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/provisionRMA + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/provisionRMA?ticketId=CHG12345 + + ## Verb + + - POST + + ## Query Parameters + + - ticket_id: Change control ticket ID (optional) + + ## Usage + + ```python + # Provision RMA + request = EpManageFabricsSwitchProvisionRMAPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + path = request.path + verb = request.verb + + # Provision RMA with change control ticket + request = EpManageFabricsSwitchProvisionRMAPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + request.endpoint_params.ticket_id = "CHG12345" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA?ticketId=CHG12345 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchProvisionRMAPost"] = Field( + default="EpManageFabricsSwitchProvisionRMAPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsTicketEndpointParams = Field( + default_factory=SwitchActionsTicketEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """Build the endpoint path with optional query string.""" + base = f"{self._base_path}/provisionRMA" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST + + +class EpManageFabricsSwitchChangeSerialNumberPost(_EpManageFabricsSwitchActionsPerSwitchBase): + """ + # Summary + + Change Switch Serial Number Endpoint + + ## Description + + Endpoint to change the serial number for a pre-provisioned switch. + + ## Path + + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/changeSwitchSerialNumber + - /api/v1/manage/fabrics/{fabricName}/switches/{switchSn}/actions/changeSwitchSerialNumber?clusterName=cluster1 + + ## Verb + + - POST + + ## Query Parameters + + - cluster_name: Target cluster name for multi-cluster deployments (optional) + + ## Usage + + ```python + # Change serial number + request = EpManageFabricsSwitchChangeSerialNumberPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + path = request.path + verb = request.verb + + # Change serial number with cluster name + request = EpManageFabricsSwitchChangeSerialNumberPost() + request.fabric_name = "MyFabric" + request.switch_sn = "SAL1948TRTT" + request.endpoint_params.cluster_name = "cluster1" + path = request.path + verb = request.verb + # Path will be: /api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1 + ``` + """ + + class_name: Literal["EpManageFabricsSwitchChangeSerialNumberPost"] = Field( + default="EpManageFabricsSwitchChangeSerialNumberPost", + frozen=True, + description="Class name for backward compatibility", + ) + endpoint_params: SwitchActionsClusterEndpointParams = Field( + default_factory=SwitchActionsClusterEndpointParams, + description="Endpoint-specific query parameters", + ) + + @property + def path(self) -> str: + """Build the endpoint path with optional query string.""" + base = f"{self._base_path}/changeSwitchSerialNumber" + query_string = self.endpoint_params.to_query_string() + if query_string: + return f"{base}?{query_string}" + return base + + @property + def verb(self) -> HttpVerbEnum: + """Return the HTTP verb for this endpoint.""" + return HttpVerbEnum.POST diff --git a/plugins/module_utils/manage_switches/__init__.py b/plugins/module_utils/manage_switches/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/module_utils/manage_switches/nd_switch_resources.py b/plugins/module_utils/manage_switches/nd_switch_resources.py new file mode 100644 index 000000000..2805276ef --- /dev/null +++ b/plugins/module_utils/manage_switches/nd_switch_resources.py @@ -0,0 +1,3600 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Manage ND fabric switch lifecycle workflows. + +This module validates desired switch state, performs discovery and fabric +operations, and coordinates POAP and RMA workflows. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Tuple, Union + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + ValidationError, +) + +from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import NDModule +from ansible_collections.cisco.nd.plugins.module_utils.enums import OperationType +from ansible_collections.cisco.nd.plugins.module_utils.nd_config_collection import ( + NDConfigCollection, +) +from ansible_collections.cisco.nd.plugins.module_utils.nd_output import NDOutput +from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( + SwitchRole, + SnmpV3AuthProtocol, + PlatformType, + DiscoveryStatus, + SystemMode, + ConfigSyncStatus, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.discovery_models import ( + SwitchDiscoveryModel, + AddSwitchesRequestModel, + ShallowDiscoveryRequestModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_data_models import ( + SwitchDataModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.bootstrap_models import ( + BootstrapImportSwitchModel, + ImportBootstrapSwitchesRequestModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.preprovision_models import ( + PreProvisionSwitchModel, + PreProvisionSwitchesRequestModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.rma_models import ( + RMASwitchModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.switch_actions_models import ( + SwitchCredentialsRequestModel, + ChangeSwitchSerialNumberRequestModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import ( + SwitchConfigModel, + POAPConfigModel, + PreprovisionConfigModel, + RMAConfigModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.utils import ( + ApiDataChecker, + FabricUtils, + SwitchOperationError, +) +from ansible_collections.cisco.nd.plugins.module_utils.manage_switches.utils import ( + SwitchWaitUtils, + mask_password, + get_switch_field, + group_switches_by_credentials, + query_bootstrap_switches, + build_bootstrap_index, + build_poap_data_block, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switches import ( + EpManageFabricsSwitchesGet, + EpManageFabricsSwitchesPost, + EpManageFabricsSwitchProvisionRMAPost, + EpManageFabricsSwitchChangeSerialNumberPost, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_actions import ( + EpManageFabricsActionsShallowDiscoveryPost, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switchactions import ( + EpManageFabricsSwitchActionsImportBootstrapPost, + EpManageFabricsSwitchActionsPreProvisionPost, + EpManageFabricsSwitchActionsRemovePost, + EpManageFabricsSwitchActionsChangeRolesPost, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_credentials_switches import ( + EpManageCredentialsSwitchesPost, +) + +# ========================================================================= +# Constants & Globals +# ========================================================================= + +# Max hops is not supported by the module. +_DISCOVERY_MAX_HOPS: int = 0 + + +@dataclass +class SwitchServiceContext: + """Store shared dependencies used by service classes. + + Attributes: + nd: ND module wrapper for requests and module interactions. + results: Shared results aggregator for task output. + fabric: Target fabric name. + log: Logger instance. + save_config: Whether to run fabric save after changes. + deploy_config: Whether to run fabric deploy after changes. + """ + + nd: NDModule + results: Results + fabric: str + log: logging.Logger + save_config: bool = True + deploy_config: bool = True + deploy_type: str = "switch" + + +# ========================================================================= +# Validation & Diff +# ========================================================================= + + +@dataclass +class SwitchPlan: + """Unified action plan produced by :meth:`SwitchDiffEngine.compute_changes`. + + All lists contain :class:`SwitchConfigModel` objects so that every state + handler receives the original user config (credentials, role, etc.) and can + act on it directly. Existing inventory entries are kept alongside only + where removal requires a serial number. + + Attributes: + to_add: New normal switches that need ``bulk_add``. + to_update: Normal switches already in fabric but with field + differences — remove-and-re-add (overridden only). + to_delete: Switches in fabric that have no corresponding config + entry (overridden / deleted states). + migration_mode: Normal switches currently in migration mode — no add + needed, but role update and finalize are applied. + idempotent: Normal switches that match desired state exactly. + to_bootstrap: POAP bootstrap configs that need the import-bootstrap + API call (switch not in fabric, or mismatch + unreachable). + normal_readd: POAP/preprovision configs whose switch *is* reachable + and can be re-added via the normal bulk_add path. + to_preprovision: Pre-provision configs that need the preProvision API call. + to_swap: Serial-swap configs (poap + preprovision both present). + to_rma: RMA configs. + poap_ips: Seed IPs of all POAP/preprovision/swap configs — used by + overridden to skip these IPs during the cleanup sweep. + to_delete_existing: Existing ``SwitchDataModel`` records for switches that + must be deleted before re-add (POAP/preprovision mismatches + and overridden normal updates). Kept parallel to the + config-level lists above. + """ + + # Normal-switch diff buckets (config side) + to_add: List["SwitchConfigModel"] + to_update: List["SwitchConfigModel"] + to_delete: List["SwitchDataModel"] + migration_mode: List["SwitchConfigModel"] + idempotent: List["SwitchConfigModel"] + + # POAP/preprovision/swap/RMA buckets + to_bootstrap: List["SwitchConfigModel"] + normal_readd: List["SwitchConfigModel"] + to_preprovision: List["SwitchConfigModel"] + to_swap: List["SwitchConfigModel"] + to_rma: List["SwitchConfigModel"] + + # Cross-cutting helpers + poap_ips: set + to_delete_existing: List["SwitchDataModel"] + + +class SwitchDiffEngine: + """Provide stateless validation and diff computation helpers.""" + + @staticmethod + def validate_configs( + config: Union[Dict[str, Any], List[Dict[str, Any]]], + state: str, + nd: NDModule, + log: logging.Logger, + ) -> List[SwitchConfigModel]: + """Validate raw module config and return typed switch configs. + + Args: + config: Raw config dict or list of dicts from module parameters. + state: Requested module state. + nd: ND module wrapper used for failure handling. + log: Logger instance. + + Returns: + List of validated ``SwitchConfigModel`` objects. + + Raises: + ValidationError: Raised by model validation for invalid input. + """ + log.debug("ENTER: validate_configs()") + + configs_list = config if isinstance(config, list) else [config] + log.debug("Normalized to %s configuration(s)", len(configs_list)) + + validated_configs: List[SwitchConfigModel] = [] + for idx, cfg in enumerate(configs_list): + try: + validated = SwitchConfigModel.model_validate(cfg, context={"state": state}) + validated_configs.append(validated) + except ValidationError as e: + error_detail = e.errors() if hasattr(e, "errors") else str(e) + error_msg = f"Configuration validation failed for " f"config index {idx}: {error_detail}" + log.error(error_msg) + if hasattr(nd, "module"): + nd.module.fail_json(msg=error_msg) + else: + raise ValueError(error_msg) from e + except Exception as e: + error_msg = f"Configuration validation failed for " f"config index {idx}: {str(e)}" + log.error(error_msg) + if hasattr(nd, "module"): + nd.module.fail_json(msg=error_msg) + else: + raise ValueError(error_msg) from e + + if not validated_configs: + log.warning("No valid configurations found in input") + return validated_configs + + # Duplicate seed_ip check + seen_ips: set = set() + duplicate_ips: set = set() + for cfg in validated_configs: + if cfg.seed_ip in seen_ips: + duplicate_ips.add(cfg.seed_ip) + seen_ips.add(cfg.seed_ip) + if duplicate_ips: + error_msg = f"Duplicate seed_ip entries found in config: " f"{sorted(duplicate_ips)}. Each switch must appear only once." + log.error(error_msg) + if hasattr(nd, "module"): + nd.module.fail_json(msg=error_msg) + else: + raise ValueError(error_msg) + + operation_types = {c.operation_type for c in validated_configs} + log.info( + "Successfully validated %s configuration(s) with operation type(s): %s", + len(validated_configs), + operation_types, + ) + log.debug( + "EXIT: validate_configs() -> %s configs, operation_types=%s", + len(validated_configs), + operation_types, + ) + return validated_configs + + @staticmethod + def compute_changes( + proposed_configs: List[SwitchConfigModel], + existing: List[SwitchDataModel], + log: logging.Logger, + ) -> "SwitchPlan": + """Classify all proposed configs against the current fabric inventory. + + Accepts the full mix of normal, POAP/preprovision, swap, and RMA configs + and produces a unified :class:`SwitchPlan` that each state handler can + act on directly. This is the single idempotency gate for all operation + types. + + Idempotency rules by operation type: + + * **normal** — compare ``role`` against the existing inventory entry + found by ``seed_ip``. Role is the only user-specifiable field for + normal switches; hostname, model, and software version are not + user-supplied and are not compared. No discovery is performed for + switches already in the fabric. + * **poap / preprovision** — compare ``seed_ip``, ``serial_number`` + (from ``poap.serial_number`` / ``preprovision.serial_number``), and + ``role`` against the existing inventory. If all three match the + switch is idempotent and skipped. On a mismatch the routing depends + on ``discovery_status``: + + - Bootstrap mismatch, ``discovery_status == OK`` → ``normal_readd`` + - Bootstrap mismatch, anything else → ``to_bootstrap`` + - Preprovision mismatch, ``discovery_status == UNREACHABLE`` → ``to_preprovision`` + - Preprovision mismatch, anything else → ``normal_readd`` + + * **swap** — always active (no idempotency check; the caller validates + preconditions). + * **rma** — always active (no idempotency check; caller validates). + + Args: + proposed_configs: All validated switch configs for this run. + existing: Current fabric inventory snapshot. + log: Logger instance. + + Returns: + :class:`SwitchPlan` with all buckets populated. + """ + log.debug("ENTER: compute_changes()") + log.info( + "compute_changes: %s proposed config(s) vs %s existing switch(es)", + len(proposed_configs), + len(existing), + ) + + existing_by_ip: Dict[str, SwitchDataModel] = {sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip} + existing_by_id: Dict[str, SwitchDataModel] = {sw.switch_id: sw for sw in existing if sw.switch_id} + + # Output buckets + to_add: List[SwitchConfigModel] = [] + to_update: List[SwitchConfigModel] = [] + to_delete_existing: List[SwitchDataModel] = [] + migration_mode: List[SwitchConfigModel] = [] + idempotent: List[SwitchConfigModel] = [] + to_bootstrap: List[SwitchConfigModel] = [] + normal_readd: List[SwitchConfigModel] = [] + to_preprovision: List[SwitchConfigModel] = [] + to_swap: List[SwitchConfigModel] = [] + to_rma: List[SwitchConfigModel] = [] + poap_ips: set = set() + + # Track which existing switch IDs are accounted for by a config + accounted_ids: set = set() + + for cfg in proposed_configs: + op = cfg.operation_type + + # ------------------------------------------------------------------ + # RMA — no idempotency check; always active + # ------------------------------------------------------------------ + if op == "rma": + to_rma.append(cfg) + continue + + existing_sw = existing_by_ip.get(cfg.seed_ip) + if existing_sw: + accounted_ids.add(existing_sw.switch_id) + + # ------------------------------------------------------------------ + # POAP swap — both poap and preprovision blocks present + # ------------------------------------------------------------------ + if op == "swap": + poap_ips.add(cfg.seed_ip) + to_swap.append(cfg) + continue + + # ------------------------------------------------------------------ + # POAP bootstrap + # ------------------------------------------------------------------ + if op == "poap": + poap_ips.add(cfg.seed_ip) + serial = cfg.poap.serial_number if cfg.poap else None + + if not existing_sw: + log.info("Bootstrap %s: not in fabric — queue for bootstrap", cfg.seed_ip) + to_bootstrap.append(cfg) + continue + + serial_match = serial and serial in (existing_sw.serial_number, existing_sw.switch_id) + role_match = cfg.role is None or cfg.role == existing_sw.switch_role + if serial_match and role_match: + log.info( + "Bootstrap %s serial=%s role=%s — idempotent, skipping", + cfg.seed_ip, + serial, + cfg.role, + ) + idempotent.append(cfg) + continue + + status = existing_sw.additional_data.discovery_status if existing_sw.additional_data else None + log.info( + "Bootstrap %s differs (serial_match=%s, role_match=%s, status=%s) — deleting existing", + cfg.seed_ip, + serial_match, + role_match, + getattr(status, "value", status) if status else "unknown", + ) + to_delete_existing.append(existing_sw) + if status == DiscoveryStatus.OK: + log.info("Bootstrap %s: switch reachable — routing to normal_readd", cfg.seed_ip) + normal_readd.append(cfg) + else: + log.info("Bootstrap %s: switch unreachable — routing to bootstrap workflow", cfg.seed_ip) + to_bootstrap.append(cfg) + continue + + # ------------------------------------------------------------------ + # Pre-provision + # ------------------------------------------------------------------ + if op == "preprovision": + poap_ips.add(cfg.seed_ip) + pp = cfg.preprovision + serial = pp.serial_number if pp else None + + if not existing_sw: + log.info("Preprovision %s: not in fabric — queue for preprovision", cfg.seed_ip) + to_preprovision.append(cfg) + continue + + serial_match = bool(serial and serial in (existing_sw.serial_number, existing_sw.switch_id)) + role_match = cfg.role is None or cfg.role == existing_sw.switch_role + model_match = pp is None or pp.model is None or pp.model == existing_sw.model + version_match = pp is None or pp.version is None or pp.version == existing_sw.software_version + hostname_match = pp is None or pp.hostname is None or pp.hostname == existing_sw.hostname + + if serial_match and role_match and model_match and version_match and hostname_match: + log.info( + "Preprovision %s serial=%s role=%s model=%s version=%s hostname=%s — idempotent, skipping", + cfg.seed_ip, + serial, + cfg.role, + pp.model if pp else None, + pp.version if pp else None, + pp.hostname if pp else None, + ) + idempotent.append(cfg) + continue + + diffs = [] + if not serial_match: + diffs.append(f"serial(config={serial}, fabric={existing_sw.serial_number})") + if not role_match: + diffs.append(f"role(config={cfg.role}, fabric={existing_sw.switch_role})") + if not model_match: + diffs.append(f"model(config={pp.model if pp else None}, fabric={existing_sw.model})") + if not version_match: + diffs.append(f"version(config={pp.version if pp else None}, fabric={existing_sw.software_version})") + if not hostname_match: + diffs.append(f"hostname(config={pp.hostname if pp else None}, fabric={existing_sw.hostname})") + + status = existing_sw.additional_data.discovery_status if existing_sw.additional_data else None + log.info( + "Preprovision %s differs [%s] (status=%s) — deleting existing", + cfg.seed_ip, + ", ".join(diffs), + getattr(status, "value", status) if status else "unknown", + ) + to_delete_existing.append(existing_sw) + if status == DiscoveryStatus.UNREACHABLE: + log.info("Preprovision %s: switch unreachable — routing to preprovision workflow", cfg.seed_ip) + to_preprovision.append(cfg) + else: + log.info("Preprovision %s: switch reachable — routing to normal_readd", cfg.seed_ip) + normal_readd.append(cfg) + continue + + # ------------------------------------------------------------------ + # Normal switch + # ------------------------------------------------------------------ + if op == "normal": + if not existing_sw: + log.info("Normal %s: not in fabric — queue for discovery + add", cfg.seed_ip) + to_add.append(cfg) + continue + + if existing_sw.additional_data and existing_sw.additional_data.system_mode == SystemMode.MIGRATION: + log.info("Normal %s (%s): in migration mode", cfg.seed_ip, existing_sw.switch_id) + migration_mode.append(cfg) + continue + + # Role is the only user-specifiable field for a normal switch. + # hostname, model, and software_version are device-reported and + # not part of desired config — no discovery needed. + role_match = cfg.role is None or cfg.role == existing_sw.switch_role + if role_match: + log.info("Normal %s: in fabric, role matches — idempotent", cfg.seed_ip) + idempotent.append(cfg) + else: + log.info( + "Normal %s: role mismatch (config=%s, existing=%s) — marking to_update", + cfg.seed_ip, + cfg.role, + existing_sw.switch_role, + ) + to_update.append(cfg) + continue + + # Switches in fabric that no config entry accounts for + # (only meaningful for overridden / deleted states) + to_delete: List[SwitchDataModel] = [] + for sw in existing: + if sw.switch_id and sw.switch_id not in accounted_ids and sw.fabric_management_ip not in poap_ips: + log.info( + "Existing %s (%s) has no config entry — marking to_delete", + sw.fabric_management_ip, + sw.switch_id, + ) + to_delete.append(sw) + + plan = SwitchPlan( + to_add=to_add, + to_update=to_update, + to_delete=to_delete, + migration_mode=migration_mode, + idempotent=idempotent, + to_bootstrap=to_bootstrap, + normal_readd=normal_readd, + to_preprovision=to_preprovision, + to_swap=to_swap, + to_rma=to_rma, + poap_ips=poap_ips, + to_delete_existing=to_delete_existing, + ) + log.info( + "compute_changes: to_add=%s, to_update=%s, to_delete=%s, migration=%s, " + "idempotent=%s, bootstrap=%s, normal_readd=%s, preprov=%s, swap=%s, rma=%s", + len(plan.to_add), + len(plan.to_update), + len(plan.to_delete), + len(plan.migration_mode), + len(plan.idempotent), + len(plan.to_bootstrap), + len(plan.normal_readd), + len(plan.to_preprovision), + len(plan.to_swap), + len(plan.to_rma), + ) + log.debug("EXIT: compute_changes()") + return plan + + @staticmethod + def validate_switch_api_fields( + nd: NDModule, + serial: str, + model: Optional[str], + version: Optional[str], + config_data, + bootstrap_data: Dict[str, Any], + log: logging.Logger, + context: str, + hostname: Optional[str] = None, + ) -> None: + """Validate user-supplied switch fields against the bootstrap API response. + + Only fields that are provided (non-None) are validated against the API. + Fields that are omitted are silently filled in from the API at build + time — no error is raised for those. Any omitted fields are logged at + INFO level so the operator can see what was sourced from the API. + + Args: + nd: ND module wrapper used for failure handling. + serial: Serial number of the switch being processed. + model: User-provided switch model, or None if omitted. + version: User-provided software version, or None if omitted. + config_data: User-provided ``ConfigDataModel``, or None if omitted. + bootstrap_data: Matching entry from the bootstrap GET API. + log: Logger instance. + context: Label used in error messages (e.g. ``"Bootstrap"`` or ``"RMA"``). + hostname: User-provided hostname, or None if omitted (bootstrap only). + + Returns: + None. + """ + bs_data = bootstrap_data.get("data") or {} + mismatches: List[str] = [] + + if model is not None and model != bootstrap_data.get("model"): + mismatches.append(f"model: provided '{model}', " f"bootstrap reports '{bootstrap_data.get('model')}'") + + if version is not None and version != bootstrap_data.get("softwareVersion"): + mismatches.append(f"version: provided '{version}', " f"bootstrap reports '{bootstrap_data.get('softwareVersion')}'") + + if config_data is not None: + bs_gateway = bootstrap_data.get("gatewayIpMask") or bs_data.get("gatewayIpMask") + if config_data.gateway is not None and config_data.gateway != bs_gateway: + mismatches.append(f"config_data.gateway: provided '{config_data.gateway}', " f"bootstrap reports '{bs_gateway}'") + + bs_models = bs_data.get("models", []) + if config_data.models and sorted(config_data.models) != sorted(bs_models): + mismatches.append(f"config_data.models: provided {config_data.models}, " f"bootstrap reports {bs_models}") + + if mismatches: + nd.module.fail_json( + msg=( + f"{context} field mismatch for serial '{serial}'. " + f"The following provided values do not match the " + f"bootstrap API data:\n" + "\n".join(f" - {m}" for m in mismatches) + ) + ) + + # Log any fields that were omitted and will be sourced from the API + pulled: List[str] = [] + if model is None: + pulled.append("model") + if version is None: + pulled.append("version") + if hostname is None: + pulled.append("hostname") + if config_data is None: + pulled.append("config_data (gateway + models)") + if pulled: + log.info( + "%s serial '%s': the following fields were not provided and will be sourced from the bootstrap API: %s", + context, + serial, + ", ".join(pulled), + ) + else: + log.debug("%s field validation passed for serial '%s'", context, serial) + + +# ========================================================================= +# Switch Discovery Service +# ========================================================================= + + +class SwitchDiscoveryService: + """Handle switch discovery and proposed-model construction.""" + + def __init__(self, ctx: SwitchServiceContext): + """Initialize the discovery service. + + Args: + ctx: Shared service context. + + Returns: + None. + """ + self.ctx = ctx + + def discover( + self, + switch_configs: List[SwitchConfigModel], + ) -> Dict[str, Dict[str, Any]]: + """Discover switches for the provided config list. + + Args: + switch_configs: Validated switch configuration entries. + + Returns: + Dict mapping seed IP to raw discovery data. + """ + log = self.ctx.log + log.debug("Step 1: Grouping switches by credentials") + credential_groups = group_switches_by_credentials(switch_configs, log) + log.debug("Created %s credential group(s)", len(credential_groups)) + + log.debug("Step 2: Bulk discovering switches") + all_discovered: Dict[str, Dict[str, Any]] = {} + for group_key, switches in credential_groups.items(): + username, _pw_hash, auth_proto, platform_type, _preserve = group_key + password = switches[0].password + + log.debug( + "Discovering group: %s switches with username=%s", + len(switches), + username, + ) + try: + discovered_batch = self.bulk_discover( + switches=switches, + username=username, + password=password, + auth_proto=auth_proto, + platform_type=platform_type, + ) + all_discovered.update(discovered_batch) + except Exception as e: + seed_ips = [sw.seed_ip for sw in switches] + msg = f"Discovery failed for credential group " f"(username={username}, IPs={seed_ips}): {e}" + log.error(msg) + self.ctx.nd.module.fail_json(msg=msg) + + log.debug("Total discovered: %s switches", len(all_discovered)) + return all_discovered + + def bulk_discover( + self, + switches: List[SwitchConfigModel], + username: str, + password: str, + auth_proto: SnmpV3AuthProtocol, + platform_type: PlatformType, + ) -> Dict[str, Dict[str, Any]]: + """Run one bulk discovery call for switches with shared credentials. + + Args: + switches: Switches to discover. + username: Discovery username. + password: Discovery password. + auth_proto: SNMP v3 authentication protocol. + platform_type: Platform type for discovery. + + Returns: + Dict mapping seed IP to discovered switch data. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: bulk_discover()") + log.debug("Discovering %s switches in bulk", len(switches)) + + endpoint = EpManageFabricsActionsShallowDiscoveryPost() + endpoint.fabric_name = self.ctx.fabric + + seed_ips = [switch.seed_ip for switch in switches] + log.debug("Seed IPs: %s", seed_ips) + + max_hops = _DISCOVERY_MAX_HOPS + + discovery_request = ShallowDiscoveryRequestModel( + seedIpCollection=seed_ips, + maxHop=max_hops, + platformType=platform_type, + snmpV3AuthProtocol=auth_proto, + username=username, + password=password, + ) + + payload = discovery_request.to_payload() + log.info("Bulk discovering %s switches: %s", len(seed_ips), ", ".join(seed_ips)) + log.debug("Discovery endpoint: %s", endpoint.path) + log.debug("Discovery payload (password masked): %s", mask_password(payload)) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + + results.action = "discover" + results.operation_type = OperationType.QUERY + results.response_current = response + results.result_current = result + results.diff_current = payload + results.register_api_call() + + # Extract discovered switches from response + switches_data = [] + response_data: Dict[str, Any] = {} + if response and isinstance(response, dict): + if "DATA" in response and isinstance(response["DATA"], dict): + response_data = response["DATA"] + switches_data = response_data.get("switches", []) + elif "body" in response and isinstance(response["body"], dict): + response_data = response["body"] + switches_data = response_data.get("switches", []) + elif "switches" in response: + switches_data = response.get("switches", []) + + log.debug("Extracted %s switches from discovery response", len(switches_data)) + + ApiDataChecker.check(response_data, f"Switch discovery for {seed_ips}", log, nd.module.fail_json) + + # Fail early for any unreachable switches — before data touches models. + # The API returns status="notReachable" with an empty serialNumber and + # a top-level "warning" string explaining reachability requirements. + unreachable = [sw for sw in switches_data if isinstance(sw, dict) and sw.get("status", "").lower() == "notreachable"] + if unreachable: + api_warning = response_data.get("warning", "").strip() + msg = f"Switch discovery failed: {api_warning}" + log.error(msg) + nd.module.fail_json(msg=msg) + + discovered_results: Dict[str, Dict[str, Any]] = {} + for discovered in switches_data: + if not isinstance(discovered, dict): + continue + + ip = discovered.get("ip") + status = discovered.get("status", "").lower() + serial_number = discovered.get("serialNumber") + + if not serial_number: + msg = f"Switch {ip} discovery response missing serial number. " f"Cannot proceed without a valid serial number." + log.error(msg) + nd.module.fail_json(msg=msg) + if not ip: + msg = f"Switch with serial {serial_number} discovery response " f"missing IP address. Cannot proceed without a valid IP." + log.error(msg) + nd.module.fail_json(msg=msg) + + if status in ("manageable", "ok"): + discovered_results[ip] = discovered + log.info( + "Switch %s (%s) discovered successfully - status: %s", + ip, + serial_number, + status, + ) + elif status == "alreadymanaged": + log.info("Switch %s (%s) is already managed", ip, serial_number) + discovered_results[ip] = discovered + else: + reason = discovered.get("statusReason", "Unknown") + log.error( + "Switch %s discovery failed - status: %s, reason: %s", + ip, + status, + reason, + ) + + for seed_ip in seed_ips: + if seed_ip not in discovered_results: + log.warning("Switch %s not found in discovery response", seed_ip) + + log.info( + "Bulk discovery completed: %s/%s switches successful", + len(discovered_results), + len(seed_ips), + ) + log.debug("Discovered switches: %s", list(discovered_results.keys())) + log.debug("EXIT: bulk_discover() -> %s discovered", len(discovered_results)) + return discovered_results + + except Exception as e: + msg = f"Bulk discovery failed for switches " f"{', '.join(seed_ips)}: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + def build_proposed( + self, + proposed_config: List[SwitchConfigModel], + discovered_data: Dict[str, Dict[str, Any]], + existing: List[SwitchDataModel], + ) -> List[SwitchDataModel]: + """Build proposed switch models from discovery and inventory data. + + Args: + proposed_config: Validated switch config entries. + discovered_data: Mapping of seed IP to raw discovery data. + existing: Current fabric inventory snapshot. + + Returns: + List of ``SwitchDataModel`` instances for proposed state. + """ + log = self.ctx.log + proposed: List[SwitchDataModel] = [] + + for cfg in proposed_config: + seed_ip = cfg.seed_ip + discovered = discovered_data.get(seed_ip) + + if discovered: + if cfg.role is not None: + discovered = {**discovered, "role": cfg.role} + proposed.append(SwitchDataModel.from_response(discovered)) + log.debug("Built proposed model from discovery for %s", seed_ip) + continue + + # Fallback: switch may already be in the fabric inventory + existing_match = next( + (sw for sw in existing if sw.fabric_management_ip == seed_ip), + None, + ) + if existing_match: + if cfg.role is not None: + data = existing_match.model_dump(by_alias=True) + data["switchRole"] = cfg.role.value if isinstance(cfg.role, SwitchRole) else cfg.role + proposed.append(SwitchDataModel.model_validate(data)) + else: + proposed.append(existing_match) + log.debug( + "Switch %s already in fabric inventory — using existing record (discovery skipped)", + seed_ip, + ) + continue + + msg = f"Switch with seed IP {seed_ip} not discovered " f"and not found in existing inventory." + log.error(msg) + self.ctx.nd.module.fail_json(msg=msg) + + return proposed + + +# ========================================================================= +# Bulk Fabric Operations +# ========================================================================= + + +class SwitchFabricOps: + """Run fabric mutation operations for add, delete, credentials, and roles.""" + + def __init__(self, ctx: SwitchServiceContext, fabric_utils: FabricUtils): + """Initialize the fabric operation service. + + Args: + ctx: Shared service context. + fabric_utils: Utility wrapper for fabric-level operations. + + Returns: + None. + """ + self.ctx = ctx + self.fabric_utils = fabric_utils + + def bulk_add( + self, + switches: List[Tuple[SwitchConfigModel, Dict[str, Any]]], + username: str, + password: str, + auth_proto: SnmpV3AuthProtocol, + platform_type: PlatformType, + preserve_config: bool, + ) -> Dict[str, Any]: + """Add multiple discovered switches to the fabric. + + Args: + switches: List of ``(SwitchConfigModel, discovered_data)`` tuples. + username: Discovery username. + password: Discovery password. + auth_proto: SNMP v3 authentication protocol. + platform_type: Platform type. + preserve_config: Whether to preserve existing switch config. + + Returns: + API response payload. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: bulk_add()") + log.debug("Adding %s switches to fabric", len(switches)) + + endpoint = EpManageFabricsSwitchesPost() + endpoint.fabric_name = self.ctx.fabric + + switch_discoveries = [] + for switch_config, discovered in switches: + required_fields = ["hostname", "ip", "serialNumber", "model"] + missing_fields = [f for f in required_fields if not discovered.get(f)] + + if missing_fields: + msg = f"Switch missing required fields from discovery: " f"{', '.join(missing_fields)}. Cannot add to fabric." + log.error(msg) + nd.module.fail_json(msg=msg) + + switch_role = switch_config.role if hasattr(switch_config, "role") else None + + switch_discovery = SwitchDiscoveryModel( + hostname=discovered.get("hostname"), + ip=discovered.get("ip"), + serialNumber=discovered.get("serialNumber"), + model=discovered.get("model"), + softwareVersion=discovered.get("softwareVersion"), + switchRole=switch_role, + ) + switch_discoveries.append(switch_discovery) + log.debug( + "Prepared switch for add: %s (%s)", + discovered.get("serialNumber"), + discovered.get("hostname"), + ) + + if not switch_discoveries: + log.error("No valid switches to add after validation") + raise SwitchOperationError("No valid switches to add - all failed validation") + + add_request = AddSwitchesRequestModel( + switches=switch_discoveries, + platformType=platform_type, + preserveConfig=preserve_config, + snmpV3AuthProtocol=auth_proto, + username=username, + password=password, + ) + + payload = add_request.to_payload() + serial_numbers = [d.get("serialNumber") for _cfg, d in switches] + log.info( + "Bulk adding %s switches to fabric %s: %s", + len(switches), + self.ctx.fabric, + ", ".join(serial_numbers), + ) + log.debug("Add endpoint: %s", endpoint.path) + log.debug("Add payload (password masked): %s", mask_password(payload)) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + except Exception as e: + msg = f"Bulk add switches to fabric '{self.ctx.fabric}' failed " f"for {', '.join(serial_numbers)}: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + ApiDataChecker.check( + response.get("DATA", {}), f"Bulk add switches to fabric '{self.ctx.fabric}' ({', '.join(serial_numbers)})", log, nd.module.fail_json + ) + + results.action = "create" + results.operation_type = OperationType.CREATE + results.response_current = response + results.result_current = result + results.diff_current = payload + results.register_api_call() + + if not result.get("success"): + msg = f"Bulk add switches failed for " f"{', '.join(serial_numbers)}: {response}" + log.error(msg) + nd.module.fail_json(msg=msg) + + return response + + def bulk_delete( + self, + switches: List[Union[SwitchDataModel, SwitchDiscoveryModel]], + ) -> List[str]: + """Remove multiple switches from the fabric. + + Args: + switches: Switch models to delete. + + Returns: + List of switch identifiers submitted for deletion. + + Raises: + SwitchOperationError: Raised when the delete API call fails. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: bulk_delete()") + + if nd.module.check_mode: + log.debug("Check mode: Skipping actual deletion") + return [] + + serial_numbers: List[str] = [] + for switch in switches: + sn = None + if hasattr(switch, "switch_id"): + sn = switch.switch_id + elif hasattr(switch, "serial_number"): + sn = switch.serial_number + + if sn: + serial_numbers.append(sn) + else: + ip = getattr(switch, "fabric_management_ip", None) or getattr(switch, "ip", None) + log.warning("Cannot delete switch %s: no serial number/switch_id", ip) + + if not serial_numbers: + log.warning("No valid serial numbers found for deletion") + log.debug("EXIT: bulk_delete() - nothing to delete") + return [] + + endpoint = EpManageFabricsSwitchActionsRemovePost() + endpoint.fabric_name = self.ctx.fabric + payload = {"switchIds": serial_numbers} + + log.info( + "Bulk removing %s switch(es) from fabric %s: %s", + len(serial_numbers), + self.ctx.fabric, + serial_numbers, + ) + log.debug("Delete endpoint: %s", endpoint.path) + log.debug("Delete payload: %s", payload) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + ApiDataChecker.check( + response.get("DATA", {}), f"Bulk delete switches from fabric '{self.ctx.fabric}' ({serial_numbers})", log, nd.module.fail_json + ) + + results.action = "delete" + results.operation_type = OperationType.DELETE + results.response_current = response + results.result_current = result + results.diff_current = {"deleted": serial_numbers} + results.register_api_call() + + log.info("Bulk delete submitted for %s switch(es)", len(serial_numbers)) + log.debug("EXIT: bulk_delete()") + return serial_numbers + + except Exception as e: + log.error("Bulk delete failed: %s", e) + raise SwitchOperationError(f"Bulk delete failed for {serial_numbers}: {e}") from e + + def bulk_save_credentials( + self, + switch_actions: List[Tuple[str, SwitchConfigModel]], + ) -> None: + """Save switch credentials grouped by username and password. + + Args: + switch_actions: ``(switch_id, SwitchConfigModel)`` pairs. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: bulk_save_credentials()") + + cred_groups: Dict[Tuple[str, str], List[str]] = {} + for sn, cfg in switch_actions: + if not cfg.username or not cfg.password: + log.debug("Skipping credentials for %s: missing username or password", sn) + continue + key = (cfg.username, cfg.password) + cred_groups.setdefault(key, []).append(sn) + + if not cred_groups: + log.debug("EXIT: bulk_save_credentials() - no credentials to save") + return + + endpoint = EpManageCredentialsSwitchesPost() + + for (username, password), serial_numbers in cred_groups.items(): + creds_request = SwitchCredentialsRequestModel( + switchIds=serial_numbers, + switchUsername=username, + switchPassword=password, + ) + payload = creds_request.to_payload() + + log.info( + "Saving credentials for %s switch(es): %s", + len(serial_numbers), + serial_numbers, + ) + log.debug("Credentials endpoint: %s", endpoint.path) + log.debug("Credentials payload (masked): %s", mask_password(payload)) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"Save credentials for switches {serial_numbers}", log, nd.module.fail_json) + + results.action = "save_credentials" + results.operation_type = OperationType.UPDATE + results.response_current = response + results.result_current = result + results.diff_current = { + "switchIds": serial_numbers, + "username": username, + } + results.register_api_call() + log.info("Credentials saved for %s switch(es)", len(serial_numbers)) + except Exception as e: + msg = f"Failed to save credentials for " f"switches {serial_numbers}: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + log.debug("EXIT: bulk_save_credentials()") + + def bulk_update_roles( + self, + switch_actions: List[Tuple[str, SwitchConfigModel]], + ) -> None: + """Update switch roles in bulk. + + Args: + switch_actions: ``(switch_id, SwitchConfigModel)`` pairs. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: bulk_update_roles()") + + switch_roles = [] + for sn, cfg in switch_actions: + role = get_switch_field(cfg, ["role"]) + if not role: + continue + role_value = role.value if isinstance(role, SwitchRole) else str(role) + switch_roles.append({"switchId": sn, "role": role_value}) + + if not switch_roles: + log.debug("EXIT: bulk_update_roles() - no roles to update") + return + + endpoint = EpManageFabricsSwitchActionsChangeRolesPost() + endpoint.fabric_name = self.ctx.fabric + payload = {"switchRoles": switch_roles} + + log.info("Bulk updating roles for %s switch(es)", len(switch_roles)) + log.debug("ChangeRoles endpoint: %s", endpoint.path) + log.debug("ChangeRoles payload: %s", payload) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"Update switch roles in fabric '{self.ctx.fabric}'", log, nd.module.fail_json) + + results.action = "update_role" + results.operation_type = OperationType.UPDATE + results.response_current = response + results.result_current = result + results.diff_current = payload + results.register_api_call() + log.info("Roles updated for %s switch(es)", len(switch_roles)) + except Exception as e: + msg = f"Failed to bulk update roles for switches: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + log.debug("EXIT: bulk_update_roles()") + + def finalize(self, serial_numbers: Optional[List[str]] = None) -> None: + """Run optional save and deploy actions for the fabric. + + Uses service context flags to decide whether save and deploy should be + executed. No-op in check mode. + + Args: + serial_numbers: Switch serial numbers to deploy when + ``deploy_type`` is ``switch``. Falls back to + global deploy if empty or ``None``. + + Returns: + None. + """ + if self.ctx.nd.module.check_mode: + return + + if self.ctx.save_config: + self.ctx.log.info("Saving fabric configuration") + self.fabric_utils.save_config() + + if self.ctx.deploy_config: + if self.ctx.deploy_type == "switch" and serial_numbers: + self.ctx.log.info("Switch-level deploy for: %s", serial_numbers) + self.fabric_utils.deploy_switches(serial_numbers) + else: + if self.ctx.deploy_type == "switch" and not serial_numbers: + self.ctx.log.warning("Switch-level deploy requested but no serial numbers provided — falling back to global deploy") + self.ctx.log.info("Deploying fabric configuration (global)") + self.fabric_utils.deploy_config() + + def post_add_processing( + self, + switch_actions: List[Tuple[str, SwitchConfigModel]], + wait_utils, + context: str, + all_preserve_config: bool = False, + skip_greenfield_check: bool = False, + update_roles: bool = False, + ) -> None: + """Run post-add tasks for newly processed switches. + + Args: + switch_actions: ``(switch_id, SwitchConfigModel)`` pairs. + wait_utils: Wait utility used for manageability checks. + context: Label used in logs and error messages. + all_preserve_config: Whether to use preserve-config wait behavior. + skip_greenfield_check: Whether to skip greenfield wait shortcut. + update_roles: Whether to apply bulk role updates. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + all_serials = [sn for sn, _cfg in switch_actions] + + log.info( + "Waiting for %s %s switch(es) to become manageable: %s", + len(all_serials), + context, + all_serials, + ) + + wait_kwargs: Dict[str, Any] = {} + if all_preserve_config: + wait_kwargs["all_preserve_config"] = True + if skip_greenfield_check: + wait_kwargs["skip_greenfield_check"] = True + + success = wait_utils.wait_for_switch_manageable( + all_serials, + **wait_kwargs, + ) + if not success: + msg = f"One or more {context} switches failed to become " f"manageable in fabric '{self.ctx.fabric}'. " f"Switches: {all_serials}" + log.error(msg) + nd.module.fail_json(msg=msg) + + self.bulk_save_credentials(switch_actions) + + if update_roles: + self.bulk_update_roles(switch_actions) + + try: + self.finalize(serial_numbers=all_serials) + except Exception as e: + msg = f"Failed to finalize (config-save/deploy) for " f"{context} switches {all_serials}: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + +# ========================================================================= +# POAP Handler (Bootstrap / Pre-Provision) +# ========================================================================= + + +class POAPHandler: + """Handle POAP workflows for bootstrap, pre-provision, and serial swap.""" + + def __init__( + self, + ctx: SwitchServiceContext, + fabric_ops: SwitchFabricOps, + wait_utils: SwitchWaitUtils, + ): + """Initialize the POAP workflow handler. + + Args: + ctx: Shared service context. + fabric_ops: Fabric operation service. + wait_utils: Switch wait utility service. + + Returns: + None. + """ + self.ctx = ctx + self.fabric_ops = fabric_ops + self.wait_utils = wait_utils + + def handle( + self, + proposed_config: List[SwitchConfigModel], + existing: Optional[List[SwitchDataModel]] = None, + ) -> None: + """Execute POAP processing for the provided switch configs. + + Args: + proposed_config: Validated switch configs for POAP operations. + existing: Current fabric inventory snapshot. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: POAPHandler.handle()") + log.info("Processing POAP for %s switch config(s)", len(proposed_config)) + + # Classify entries first so check mode can report per-operation counts + bootstrap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]] = [] + preprov_entries: List[Tuple[SwitchConfigModel, PreprovisionConfigModel]] = [] + swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel, PreprovisionConfigModel]] = [] + + for switch_cfg in proposed_config: + has_poap = bool(switch_cfg.poap) + has_preprov = bool(switch_cfg.preprovision) + + if has_poap and has_preprov: + # Swap: only serial_number is meaningful on each side; warn about extras + poap_extra = [ + f + for f in [ + "hostname", + "image_policy", + "discovery_username", + "discovery_password", + ] + if getattr(switch_cfg.poap, f, None) + ] + preprov_extra = [ + f + for f in [ + "model", + "version", + "hostname", + "config_data", + "image_policy", + "discovery_username", + "discovery_password", + ] + if getattr(switch_cfg.preprovision, f, None) + ] + if poap_extra: + log.warning( + "Swap (%s): extra fields in 'poap' will be ignored during swap: %s", + switch_cfg.seed_ip, + poap_extra, + ) + if preprov_extra: + log.warning( + "Swap (%s): extra fields in 'preprovision' will be ignored during swap: %s", + switch_cfg.seed_ip, + preprov_extra, + ) + swap_entries.append((switch_cfg, switch_cfg.poap, switch_cfg.preprovision)) + elif has_preprov: + preprov_entries.append((switch_cfg, switch_cfg.preprovision)) + elif has_poap: + bootstrap_entries.append((switch_cfg, switch_cfg.poap)) + else: + log.warning( + "Switch config for %s has no poap or preprovision block — skipping", + switch_cfg.seed_ip, + ) + + log.info( + "POAP classification: %s bootstrap, %s pre-provision, %s swap", + len(bootstrap_entries), + len(preprov_entries), + len(swap_entries), + ) + + # Check mode — preview only + if nd.module.check_mode: + log.info( + "Check mode: would bootstrap %s, pre-provision %s, swap %s", + len(bootstrap_entries), + len(preprov_entries), + len(swap_entries), + ) + results.action = "poap" + results.operation_type = OperationType.CREATE + results.response_current = {"MESSAGE": "check mode — skipped"} + results.result_current = {"success": True, "changed": False} + results.diff_current = { + "bootstrap": [cfg.seed_ip for cfg, _sw in bootstrap_entries], + "preprovision": [cfg.seed_ip for cfg, _sw in preprov_entries], + "swap": [cfg.seed_ip for cfg, _sw in swap_entries], + } + results.register_api_call() + return + + # Idempotency is handled entirely by compute_changes before entries + # reach this handler. Everything in bootstrap_entries / preprov_entries + # has already been classified as needing action — no re-checking here. + + # Handle swap entries (change serial number on pre-provisioned switches) + if swap_entries: + self._handle_poap_swap(swap_entries, existing or []) + + # Handle bootstrap entries + if bootstrap_entries: + self._handle_poap_bootstrap(bootstrap_entries) + + # Handle pre-provision entries + if preprov_entries: + preprov_models: List[PreProvisionSwitchModel] = [] + for switch_cfg, preprov_cfg in preprov_entries: + pp_model = self._build_preprovision_model(switch_cfg, preprov_cfg) + preprov_models.append(pp_model) + log.info( + "Built pre-provision model for serial=%s, hostname=%s, ip=%s", + pp_model.serial_number, + pp_model.hostname, + pp_model.ip, + ) + + if preprov_models: + self._preprovision_switches(preprov_models) + + # Edge case: nothing actionable + if not bootstrap_entries and not preprov_entries and not swap_entries: + log.warning("No POAP switch models built — nothing to process") + results.action = "poap" + results.operation_type = OperationType.QUERY + results.response_current = {"MESSAGE": "no switches to process"} + results.result_current = {"success": True, "changed": False} + results.diff_current = {} + results.register_api_call() + + log.debug("EXIT: POAPHandler.handle()") + + def _handle_poap_bootstrap( + self, + bootstrap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel]], + ) -> None: + """Process bootstrap POAP entries. + + Args: + bootstrap_entries: ``(SwitchConfigModel, POAPConfigModel)`` pairs + for bootstrap operations. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + + log.debug("ENTER: _handle_poap_bootstrap()") + log.info("Processing %s bootstrap entries", len(bootstrap_entries)) + + bootstrap_switches = query_bootstrap_switches(nd, self.ctx.fabric, log) + bootstrap_idx = build_bootstrap_index(bootstrap_switches) + log.debug( + "Bootstrap index contains %s switch(es): %s", + len(bootstrap_idx), + list(bootstrap_idx.keys()), + ) + + import_models: List[BootstrapImportSwitchModel] = [] + for switch_cfg, poap_cfg in bootstrap_entries: + serial = poap_cfg.serial_number + bootstrap_data = bootstrap_idx.get(serial) + + if not bootstrap_data: + msg = ( + f"Serial {serial} not found in bootstrap API " + f"response. The switch is not in the POAP loop. " + f"Ensure the switch is powered on and POAP/DHCP " + f"is enabled in the fabric." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + model = self._build_bootstrap_import_model(switch_cfg, poap_cfg, bootstrap_data) + import_models.append(model) + log.info( + "Built bootstrap model for serial=%s, hostname=%s, ip=%s", + serial, + model.hostname, + model.ip, + ) + + if not import_models: + log.warning("No bootstrap import models built") + log.debug("EXIT: _handle_poap_bootstrap()") + return + + self._import_bootstrap_switches(import_models) + + # Post-import: wait for manageability, save credentials, finalize + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + for switch_cfg, poap_cfg in bootstrap_entries: + switch_actions.append((poap_cfg.serial_number, switch_cfg)) + + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="bootstrap", + skip_greenfield_check=True, + ) + + log.debug("EXIT: _handle_poap_bootstrap()") + + def _build_bootstrap_import_model( + self, + switch_cfg: SwitchConfigModel, + poap_cfg: POAPConfigModel, + bootstrap_data: Optional[Dict[str, Any]], + ) -> BootstrapImportSwitchModel: + """Build a bootstrap import model from config and bootstrap data. + + Args: + switch_cfg: Parent switch config. + poap_cfg: POAP config entry. + bootstrap_data: Matching bootstrap response entry. + + Returns: + Completed ``BootstrapImportSwitchModel`` for API submission. + """ + log = self.ctx.log + log.debug("ENTER: _build_bootstrap_import_model(serial=%s)", poap_cfg.serial_number) + + bs = bootstrap_data or {} + bs_data = bs.get("data") or {} + + serial_number = poap_cfg.serial_number + ip = switch_cfg.seed_ip + switch_role = switch_cfg.role + password = switch_cfg.password + auth_proto = SnmpV3AuthProtocol.MD5 # POAP/bootstrap always uses MD5 + image_policy = poap_cfg.image_policy + + discovery_username = getattr(poap_cfg, "discovery_username", None) + discovery_password = getattr(poap_cfg, "discovery_password", None) + + # model, version and config_data always come from the bootstrap API for + # bootstrap-only operations. + model = bs.get("model", "") + version = bs.get("softwareVersion", "") + + gateway_ip_mask = bs.get("gatewayIpMask") or bs_data.get("gatewayIpMask") + data_models = bs_data.get("models", []) + + # Hostname: user-provided via poap.hostname is the default; if the + # bootstrap API returns a different value, the API wins and we warn. + user_hostname = poap_cfg.hostname + api_hostname = bs.get("hostname", "") + if api_hostname and api_hostname != user_hostname: + log.warning( + "Bootstrap (%s): API hostname '%s' overrides user-provided hostname '%s'. Using API value.", + serial_number, + api_hostname, + user_hostname, + ) + hostname = api_hostname + else: + hostname = user_hostname + + # Role: switch_cfg.role is user-provided; if the bootstrap API carries a + # role and it differs, the API value wins and we warn. + api_role_raw = bs.get("switchRole") or bs_data.get("switchRole") + if api_role_raw: + try: + api_role = SwitchRole.normalize(api_role_raw) + if api_role and api_role != switch_role: + log.warning( + "Bootstrap (%s): API role '%s' overrides user-provided role '%s'. Using API value.", + serial_number, + api_role_raw, + switch_role, + ) + switch_role = api_role + except Exception: + pass + + # Build the data block from resolved values (replaces build_poap_data_block) + data_block: Optional[Dict[str, Any]] = None + if gateway_ip_mask or data_models: + data_block = {} + if gateway_ip_mask: + data_block["gatewayIpMask"] = gateway_ip_mask + if data_models: + data_block["models"] = data_models + + # Bootstrap API response fields + fingerprint = bs.get("fingerPrint") or bs.get("fingerprint", "") + public_key = bs.get("publicKey", "") + re_add = bs.get("reAdd", False) + in_inventory = bs.get("inInventory", False) + + bootstrap_model = BootstrapImportSwitchModel( + serialNumber=serial_number, + model=model, + hostname=hostname, + ip=ip, + password=password, + discoveryAuthProtocol=auth_proto, + discoveryUsername=discovery_username, + discoveryPassword=discovery_password, + data=data_block, + fingerprint=fingerprint, + publicKey=public_key, + reAdd=re_add, + inInventory=in_inventory, + imagePolicy=image_policy or "", + switchRole=switch_role, + softwareVersion=version, + gatewayIpMask=gateway_ip_mask, + ) + + log.debug("EXIT: _build_bootstrap_import_model() -> %s", bootstrap_model.serial_number) + return bootstrap_model + + def _import_bootstrap_switches( + self, + models: List[BootstrapImportSwitchModel], + ) -> None: + """Submit bootstrap import models. + + Args: + models: ``BootstrapImportSwitchModel`` objects to submit. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: _import_bootstrap_switches()") + + endpoint = EpManageFabricsSwitchActionsImportBootstrapPost() + endpoint.fabric_name = self.ctx.fabric + + request_model = ImportBootstrapSwitchesRequestModel(switches=models) + payload = request_model.to_payload() + + log.debug("importBootstrap endpoint: %s", endpoint.path) + log.debug("importBootstrap payload (masked): %s", mask_password(payload)) + log.info( + "Importing %s bootstrap switch(es): %s", + len(models), + [m.serial_number for m in models], + ) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + except Exception as e: + msg = f"importBootstrap API call failed for " f"{[m.serial_number for m in models]}: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"importBootstrap for {[m.serial_number for m in models]}", log, nd.module.fail_json) + + results.action = "bootstrap" + results.operation_type = OperationType.CREATE + results.response_current = response + results.result_current = result + results.diff_current = payload + results.register_api_call() + + if not result.get("success"): + msg = f"importBootstrap failed for " f"{[m.serial_number for m in models]}: {response}" + log.error(msg) + nd.module.fail_json(msg=msg) + + log.info("importBootstrap API response success: %s", result.get("success")) + log.debug("EXIT: _import_bootstrap_switches()") + + def _build_preprovision_model( + self, + switch_cfg: SwitchConfigModel, + preprov_cfg: "PreprovisionConfigModel", + ) -> PreProvisionSwitchModel: + """Build a pre-provision model from PreprovisionConfigModel configuration. + + Args: + switch_cfg: Parent switch config. + preprov_cfg: Pre-provision config entry. + + Returns: + Completed ``PreProvisionSwitchModel`` for API submission. + """ + log = self.ctx.log + log.debug("ENTER: _build_preprovision_model(serial=%s)", preprov_cfg.serial_number) + + serial_number = preprov_cfg.serial_number + hostname = preprov_cfg.hostname + ip = switch_cfg.seed_ip + model_name = preprov_cfg.model + version = preprov_cfg.version + image_policy = preprov_cfg.image_policy + gateway_ip_mask = preprov_cfg.config_data.gateway + switch_role = switch_cfg.role + password = switch_cfg.password + auth_proto = SnmpV3AuthProtocol.MD5 # Pre-provision always uses MD5 + + discovery_username = getattr(preprov_cfg, "discovery_username", None) + discovery_password = getattr(preprov_cfg, "discovery_password", None) + + # Build data block from mandatory config_data + data_block = build_poap_data_block(preprov_cfg) + + preprov_model = PreProvisionSwitchModel( + serialNumber=serial_number, + hostname=hostname, + ip=ip, + model=model_name, + softwareVersion=version, + gatewayIpMask=gateway_ip_mask, + password=password, + discoveryAuthProtocol=auth_proto, + discoveryUsername=discovery_username, + discoveryPassword=discovery_password, + data=data_block, + imagePolicy=image_policy or None, + switchRole=switch_role, + ) + + log.debug("EXIT: _build_preprovision_model() -> %s", preprov_model.serial_number) + return preprov_model + + def _preprovision_switches( + self, + models: List[PreProvisionSwitchModel], + ) -> None: + """Submit pre-provision switch models. + + Args: + models: ``PreProvisionSwitchModel`` objects to submit. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: _preprovision_switches()") + + endpoint = EpManageFabricsSwitchActionsPreProvisionPost() + endpoint.fabric_name = self.ctx.fabric + + request_model = PreProvisionSwitchesRequestModel(switches=models) + payload = request_model.to_payload() + + log.debug("preProvision endpoint: %s", endpoint.path) + log.debug("preProvision payload (masked): %s", mask_password(payload)) + log.info( + "Pre-provisioning %s switch(es): %s", + len(models), + [m.serial_number for m in models], + ) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + except Exception as e: + msg = f"preProvision API call failed for " f"{[m.serial_number for m in models]}: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"preProvision for {[m.serial_number for m in models]}", log, nd.module.fail_json) + + results.action = "preprovision" + results.operation_type = OperationType.CREATE + results.response_current = response + results.result_current = result + results.diff_current = payload + results.register_api_call() + + if not result.get("success"): + msg = f"preProvision failed for " f"{[m.serial_number for m in models]}: {response}" + log.error(msg) + nd.module.fail_json(msg=msg) + + log.info("preProvision API response success: %s", result.get("success")) + log.debug("EXIT: _preprovision_switches()") + + def _handle_poap_swap( + self, + swap_entries: List[Tuple[SwitchConfigModel, POAPConfigModel, "PreprovisionConfigModel"]], + existing: List[SwitchDataModel], + ) -> None: + """Process POAP serial-swap entries. + + Args: + swap_entries: ``(SwitchConfigModel, POAPConfigModel, PreprovisionConfigModel)`` + swap triples where poap carries the new serial and preprovision + carries the old (pre-provisioned) serial. + existing: Current fabric inventory snapshot. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + fabric = self.ctx.fabric + + log.debug("ENTER: _handle_poap_swap()") + log.info("Processing %s POAP swap entries", len(swap_entries)) + + # ------------------------------------------------------------------ + # Step 1: Validate preprovision serials exist in fabric inventory + # ------------------------------------------------------------------ + fabric_index: Dict[str, Dict[str, Any]] = {sw.switch_id: sw.model_dump(by_alias=True) for sw in existing if sw.switch_id} + log.debug( + "Fabric inventory contains %s switch(es): %s", + len(fabric_index), + list(fabric_index.keys()), + ) + + for switch_cfg, poap_cfg, preprov_cfg in swap_entries: + old_serial = preprov_cfg.serial_number + if old_serial not in fabric_index: + msg = ( + f"Pre-provisioned serial '{old_serial}' not found in " + f"fabric '{fabric}' inventory. The switch must be " + f"pre-provisioned before a swap can be performed." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + log.info( + "Validated: pre-provisioned serial '%s' exists in fabric inventory", + old_serial, + ) + + # ------------------------------------------------------------------ + # Step 2: Validate new serials exist in bootstrap list + # ------------------------------------------------------------------ + bootstrap_switches = query_bootstrap_switches(nd, fabric, log) + bootstrap_index = build_bootstrap_index(bootstrap_switches) + log.debug( + "Bootstrap list contains %s switch(es): %s", + len(bootstrap_index), + list(bootstrap_index.keys()), + ) + + for switch_cfg, poap_cfg, preprov_cfg in swap_entries: + new_serial = poap_cfg.serial_number + if new_serial not in bootstrap_index: + msg = ( + f"New serial '{new_serial}' not found in the bootstrap " + f"(POAP) list for fabric '{fabric}'. The physical " + f"switch must be in the POAP loop before a swap can be " + f"performed." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + log.info("Validated: new serial '%s' exists in bootstrap list", new_serial) + + # ------------------------------------------------------------------ + # Step 3: Call changeSwitchSerialNumber for each swap entry + # ------------------------------------------------------------------ + for switch_cfg, poap_cfg, preprov_cfg in swap_entries: + old_serial = preprov_cfg.serial_number + new_serial = poap_cfg.serial_number + + log.info( + "Swapping serial for pre-provisioned switch: %s → %s", + old_serial, + new_serial, + ) + + endpoint = EpManageFabricsSwitchChangeSerialNumberPost() + endpoint.fabric_name = fabric + endpoint.switch_sn = old_serial + + request_body = ChangeSwitchSerialNumberRequestModel(newSwitchId=new_serial) + payload = request_body.to_payload() + + log.debug("changeSwitchSerialNumber endpoint: %s", endpoint.path) + log.debug("changeSwitchSerialNumber payload: %s", payload) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + except Exception as e: + msg = f"changeSwitchSerialNumber API call failed for " f"{old_serial} → {new_serial}: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"changeSwitchSerialNumber {old_serial} → {new_serial}", log, nd.module.fail_json) + + results.action = "swap_serial" + results.operation_type = OperationType.UPDATE + results.response_current = response + results.result_current = result + results.diff_current = { + "old_serial": old_serial, + "new_serial": new_serial, + } + results.register_api_call() + + if not result.get("success"): + msg = f"Failed to swap serial number from {old_serial} " f"to {new_serial}: {response}" + log.error(msg) + nd.module.fail_json(msg=msg) + + log.info("Serial number swap successful: %s → %s", old_serial, new_serial) + # ------------------------------------------------------------------ + # Step 4: Re-query bootstrap API for post-swap data + # ------------------------------------------------------------------ + post_swap_bootstrap = query_bootstrap_switches(nd, fabric, log) + post_swap_index = build_bootstrap_index(post_swap_bootstrap) + log.debug("Post-swap bootstrap list contains %s switch(es)", len(post_swap_index)) + + # ------------------------------------------------------------------ + # Step 5: Build BootstrapImportSwitchModels and POST importBootstrap + # ------------------------------------------------------------------ + import_models: List[BootstrapImportSwitchModel] = [] + for switch_cfg, poap_cfg, preprov_cfg in swap_entries: + new_serial = poap_cfg.serial_number + bootstrap_data = post_swap_index.get(new_serial) + + if not bootstrap_data: + msg = ( + f"Serial '{new_serial}' not found in bootstrap API " + f"response after swap. The controller may not have " + f"updated the bootstrap list yet." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + model = self._build_bootstrap_import_model(switch_cfg, poap_cfg, bootstrap_data) + import_models.append(model) + log.info( + "Built bootstrap model for swapped serial=%s, hostname=%s, ip=%s", + new_serial, + model.hostname, + model.ip, + ) + + if not import_models: + log.warning("No bootstrap import models built after swap") + log.debug("EXIT: _handle_poap_swap()") + return + + try: + self._import_bootstrap_switches(import_models) + except Exception as e: + msg = f"importBootstrap failed after serial swap: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + # ------------------------------------------------------------------ + # Step 6: Wait for manageability, save credentials, finalize + # ------------------------------------------------------------------ + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + for switch_cfg, poap_cfg, preprov_cfg in swap_entries: + switch_actions.append((poap_cfg.serial_number, switch_cfg)) + + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="swap", + skip_greenfield_check=True, + ) + + log.info( + "POAP swap completed successfully for %s switch(es): %s", + len(swap_entries), + [sn for sn, _cfg in switch_actions], + ) + log.debug("EXIT: _handle_poap_swap()") + + +# ========================================================================= +# RMA Handler (Return Material Authorization) +# ========================================================================= + + +class RMAHandler: + """Handle RMA workflows for switch replacement.""" + + def __init__( + self, + ctx: SwitchServiceContext, + fabric_ops: SwitchFabricOps, + wait_utils: SwitchWaitUtils, + ): + """Initialize the RMA workflow handler. + + Args: + ctx: Shared service context. + fabric_ops: Fabric operation service. + wait_utils: Switch wait utility service. + + Returns: + None. + """ + self.ctx = ctx + self.fabric_ops = fabric_ops + self.wait_utils = wait_utils + + def handle( + self, + proposed_config: List[SwitchConfigModel], + existing: List[SwitchDataModel], + ) -> None: + """Execute RMA processing for the provided switch configs. + + Args: + proposed_config: Validated switch configs for RMA operations. + existing: Current fabric inventory snapshot. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: RMAHandler.handle()") + log.info("Processing RMA for %s switch config(s)", len(proposed_config)) + + # Check mode — preview only + if nd.module.check_mode: + log.info("Check mode: would run RMA provision") + results.action = "rma" + results.operation_type = OperationType.CREATE + results.response_current = {"MESSAGE": "check mode — skipped"} + results.result_current = {"success": True, "changed": False} + results.diff_current = {"rma_switches": [pc.seed_ip for pc in proposed_config]} + results.register_api_call() + return + + # Collect (SwitchConfigModel, RMAConfigModel) pairs + rma_entries: List[Tuple[SwitchConfigModel, RMAConfigModel]] = [] + for switch_cfg in proposed_config: + if not switch_cfg.rma: + log.warning( + "Switch config for %s has no RMA block — skipping", + switch_cfg.seed_ip, + ) + continue + for rma_cfg in switch_cfg.rma: + rma_entries.append((switch_cfg, rma_cfg)) + + if not rma_entries: + log.warning("No RMA entries found — nothing to process") + results.action = "rma" + results.operation_type = OperationType.QUERY + results.response_current = {"MESSAGE": "no switches to process"} + results.result_current = {"success": True, "changed": False} + results.diff_current = {} + results.register_api_call() + return + + log.info("Found %s RMA entry/entries to process", len(rma_entries)) + + # Validate old switches exist and are in correct state; look up by seed_ip + old_switch_info = self._validate_prerequisites(rma_entries, existing) + + # Query bootstrap API for new switch data + bootstrap_switches = query_bootstrap_switches(nd, self.ctx.fabric, log) + bootstrap_idx = build_bootstrap_index(bootstrap_switches) + log.debug( + "Bootstrap index contains %s switch(es): %s", + len(bootstrap_idx), + list(bootstrap_idx.keys()), + ) + + # Build and submit each RMA request + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + for switch_cfg, rma_cfg in rma_entries: + new_serial = rma_cfg.new_serial_number + old_serial = old_switch_info[switch_cfg.seed_ip]["old_serial"] + bootstrap_data = bootstrap_idx.get(new_serial) + + if not bootstrap_data: + msg = ( + f"New switch serial {new_serial} not found in " + f"bootstrap API response. The switch is not in the " + f"POAP loop. Ensure the replacement switch is powered " + f"on and POAP/DHCP is enabled in the fabric." + ) + log.error(msg) + nd.module.fail_json(msg=msg) + + rma_model = self._build_rma_model( + switch_cfg, + rma_cfg, + bootstrap_data, + old_switch_info[switch_cfg.seed_ip], + ) + log.info( + "Built RMA model: replacing %s with %s", + old_serial, + rma_model.new_switch_id, + ) + + self._provision_rma_switch(rma_model) + switch_actions.append((rma_model.new_switch_id, switch_cfg)) + + # Post-processing: wait for RMA switches to become ready, then + # save credentials and finalize. RMA switches come up via POAP + # bootstrap and never enter migration mode, so we use the + # RMA-specific wait (unreachable → ok) instead of the generic + # wait_for_switch_manageable which would time out on the + # migration-mode phase. + all_new_serials = [sn for sn, _cfg in switch_actions] + log.info( + "Waiting for %s RMA replacement switch(es) to become ready: %s", + len(all_new_serials), + all_new_serials, + ) + success = self.wait_utils.wait_for_rma_switch_ready(all_new_serials) + if not success: + msg = f"One or more RMA replacement switches failed to become " f"discoverable in fabric '{self.ctx.fabric}'. " f"Switches: {all_new_serials}" + log.error(msg) + nd.module.fail_json(msg=msg) + + self.fabric_ops.bulk_save_credentials(switch_actions) + + try: + self.fabric_ops.finalize(serial_numbers=all_new_serials) + except Exception as e: + msg = f"Failed to finalize (config-save/deploy) for RMA " f"switches {all_new_serials}: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + log.debug("EXIT: RMAHandler.handle()") + + def _validate_prerequisites( + self, + rma_entries: List[Tuple[SwitchConfigModel, RMAConfigModel]], + existing: List[SwitchDataModel], + ) -> Dict[str, Dict[str, Any]]: + """Validate RMA prerequisites for each requested replacement. + + Looks up the switch to be replaced by ``seed_ip`` (the fabric management + IP). The serial number of the old switch is derived from inventory — + it is not required in the playbook config. + + Args: + rma_entries: ``(SwitchConfigModel, RMAConfigModel)`` pairs. + existing: Current fabric inventory snapshot. + + Returns: + Dict keyed by ``seed_ip`` with prerequisite metadata including + ``old_serial``, ``hostname``, and ``switch_data``. + """ + nd = self.ctx.nd + log = self.ctx.log + + log.debug("ENTER: _validate_prerequisites()") + + existing_by_ip: Dict[str, SwitchDataModel] = {sw.fabric_management_ip: sw for sw in existing if sw.fabric_management_ip} + + result: Dict[str, Dict[str, Any]] = {} + + for switch_cfg, _rma_cfg in rma_entries: + seed_ip = switch_cfg.seed_ip + + old_switch = existing_by_ip.get(seed_ip) + if old_switch is None: + nd.module.fail_json( + msg=( + f"RMA: seed_ip '{seed_ip}' not found in " + f"fabric '{self.ctx.fabric}' inventory. The switch " + f"being replaced must exist in the fabric." + ) + ) + + old_serial = old_switch.serial_number or old_switch.switch_id + if not old_serial: + nd.module.fail_json(msg=(f"RMA: Switch at '{seed_ip}' has no serial number in " f"the inventory response.")) + + ad = old_switch.additional_data + if ad is None: + nd.module.fail_json( + msg=( + f"RMA: Switch at '{seed_ip}' (serial '{old_serial}') has no " + f"additional data in the inventory response. Cannot verify " + f"discovery status and system mode." + ) + ) + + if ad.discovery_status != DiscoveryStatus.UNREACHABLE: + nd.module.fail_json( + msg=( + f"RMA: Switch at '{seed_ip}' (serial '{old_serial}') has discovery status " + f"'{getattr(ad.discovery_status, 'value', ad.discovery_status) if ad.discovery_status else 'unknown'}', " + f"expected 'unreachable'. The old switch must be " + f"unreachable before RMA can proceed." + ) + ) + + if ad.system_mode != SystemMode.MAINTENANCE: + nd.module.fail_json( + msg=( + f"RMA: Switch at '{seed_ip}' (serial '{old_serial}') is in " + f"'{getattr(ad.system_mode, 'value', ad.system_mode) if ad.system_mode else 'unknown'}' " + f"mode, expected 'maintenance'. Put the switch in " + f"maintenance mode before initiating RMA." + ) + ) + + result[seed_ip] = { + "old_serial": old_serial, + "hostname": old_switch.hostname or "", + "switch_data": old_switch, + } + log.info( + "RMA prerequisite check passed for '%s' (serial=%s, discovery=%s, mode=%s)", + seed_ip, + old_serial, + ad.discovery_status, + ad.system_mode, + ) + + log.debug("EXIT: _validate_prerequisites()") + return result + + def _build_rma_model( + self, + switch_cfg: SwitchConfigModel, + rma_cfg: RMAConfigModel, + bootstrap_data: Dict[str, Any], + old_switch_info: Dict[str, Any], + ) -> RMASwitchModel: + """Build an RMA model from config and bootstrap data. + + All switch properties (model, version, gateway, modules) are sourced + exclusively from the bootstrap API response. Only the new serial number, + optional image policy, and optional discovery credentials come from the + playbook config. + + Args: + switch_cfg: Parent switch config. + rma_cfg: RMA config entry. + bootstrap_data: Bootstrap response entry for the replacement switch. + old_switch_info: Prerequisite metadata keyed from _validate_prerequisites. + + Returns: + Completed ``RMASwitchModel`` for API submission. + """ + log = self.ctx.log + old_serial = old_switch_info["old_serial"] + log.debug( + "ENTER: _build_rma_model(new=%s, old=%s)", + rma_cfg.new_serial_number, + old_serial, + ) + + bs_data = bootstrap_data.get("data") or {} + + gateway_ip_mask = bootstrap_data.get("gatewayIpMask") or bs_data.get("gatewayIpMask", "") + data_models = bs_data.get("models", []) + model = bootstrap_data.get("model", "") + software_version = bootstrap_data.get("softwareVersion", "") + public_key = bootstrap_data.get("publicKey", "") + finger_print = bootstrap_data.get("fingerPrint") or bootstrap_data.get("fingerprint", "") + + rma_model = RMASwitchModel( + gatewayIpMask=gateway_ip_mask, + model=model, + softwareVersion=software_version, + imagePolicy=rma_cfg.image_policy, + switchRole=switch_cfg.role, + password=switch_cfg.password, + discoveryAuthProtocol=SnmpV3AuthProtocol.MD5, + discoveryUsername=rma_cfg.discovery_username, + discoveryPassword=rma_cfg.discovery_password, + hostname=old_switch_info.get("hostname", ""), + ip=switch_cfg.seed_ip, + newSwitchId=rma_cfg.new_serial_number, + oldSwitchId=old_serial, + publicKey=public_key, + fingerPrint=finger_print, + data=({"gatewayIpMask": gateway_ip_mask, "models": data_models} if (gateway_ip_mask or data_models) else None), + ) + + log.debug("EXIT: _build_rma_model() -> newSwitchId=%s, oldSwitchId=%s", rma_model.new_switch_id, old_serial) + return rma_model + + def _provision_rma_switch( + self, + rma_model: RMASwitchModel, + ) -> None: + """Submit an RMA provisioning request for one switch. + + The old and new switch IDs are embedded in the payload via + ``oldSwitchId`` and ``newSwitchId`` fields on the model. + + Args: + rma_model: RMA model for the replacement switch. + + Returns: + None. + """ + nd = self.ctx.nd + log = self.ctx.log + results = self.ctx.results + + log.debug("ENTER: _provision_rma_switch()") + + endpoint = EpManageFabricsSwitchProvisionRMAPost() + endpoint.fabric_name = self.ctx.fabric + endpoint.switch_sn = rma_model.old_switch_id + + payload = rma_model.to_payload() + + log.info("RMA: Replacing %s with %s", rma_model.old_switch_id, rma_model.new_switch_id) + log.debug("RMA endpoint: %s", endpoint.path) + log.debug("RMA payload (masked): %s", mask_password(payload)) + + try: + nd.request(path=endpoint.path, verb=endpoint.verb, data=payload) + except Exception as e: + msg = f"RMA provision API call failed for {rma_model.old_switch_id} → {rma_model.new_switch_id}: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + response = nd.rest_send.response_current + result = nd.rest_send.result_current + ApiDataChecker.check(response.get("DATA", {}), f"RMA provision {rma_model.old_switch_id} → {rma_model.new_switch_id}", log, nd.module.fail_json) + + results.action = "rma" + results.operation_type = OperationType.CREATE + results.response_current = response + results.result_current = result + results.diff_current = { + "old_switch_id": rma_model.old_switch_id, + "new_switch_id": rma_model.new_switch_id, + } + results.register_api_call() + + if not result.get("success"): + msg = f"RMA provision failed for {rma_model.old_switch_id} → {rma_model.new_switch_id}: {response}" + log.error(msg) + nd.module.fail_json(msg=msg) + + log.info("RMA provision API response success: %s", result.get("success")) + log.debug("EXIT: _provision_rma_switch()") + + +# ========================================================================= +# Orchestrator (Thin State Router) +# ========================================================================= + + +class NDSwitchResourceModule: + """Orchestrate switch lifecycle management across supported states.""" + + # ===================================================================== + # Initialization & Lifecycle + # ===================================================================== + + def __init__( + self, + nd: NDModule, + results: Results, + logger: Optional[logging.Logger] = None, + ): + """Initialize module state, services, and inventory snapshots. + + Args: + nd: ND module wrapper. + results: Shared results aggregator. + logger: Optional logger instance. + + Returns: + None. + """ + log = logger or logging.getLogger("nd.NDSwitchResourceModule") + self.log = log + self.nd = nd + self.module = nd.module + self.results = results + + # Module parameters + self.config = self.module.params.get("config", {}) + self.fabric = self.module.params.get("fabric") + self.state = self.module.params.get("state") + + # Shared context for service classes + config_actions = self.module.params.get("config_actions") or {} + + # Configure RestSend once: fix timeout to request_retry_count so all + # API calls use a single retry iteration instead of the default 300s loop. + # check_mode is NOT overridden globally — read-only calls that must reach + # the controller override it locally via save_settings()/restore_settings(). + self.request_retry_count: int = 1 + self.nd.rest_send_timeout = self.request_retry_count + + self.ctx = SwitchServiceContext( + nd=nd, + results=results, + fabric=self.fabric, + log=log, + save_config=config_actions.get("save", True), + deploy_config=config_actions.get("deploy", True), + deploy_type=config_actions.get("type", "switch"), + ) + + # Switch collections + try: + self.proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) + self.existing: NDConfigCollection = NDConfigCollection.from_api_response( + response_data=self._query_all_switches(), + model_class=SwitchDataModel, + ) + self.before: NDConfigCollection = self.existing.copy() + self.sent: NDConfigCollection = NDConfigCollection(model_class=SwitchDataModel) + self.sent_adds: List[SwitchConfigModel] = [] + self.proposed_cfgs: List[SwitchConfigModel] = [] + # Plan stored here after compute_changes so check-mode output can use it + self._plan: Optional[SwitchPlan] = None + except Exception as e: + msg = f"Failed to query fabric '{self.fabric}' inventory " f"during initialization: {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + # Operation tracking + self.nd_logs: List[Dict[str, Any]] = [] + self.msg: str = "" + self.output: NDOutput = NDOutput(output_level=self.module.params.get("output_level", "normal")) + self.output.assign(before=self.before, after=self.existing) + + # Utility instances (SwitchWaitUtils / FabricUtils depend on self) + self.fabric_utils = FabricUtils(self.nd, self.fabric, log) + self.wait_utils = SwitchWaitUtils(self, self.fabric, log, fabric_utils=self.fabric_utils) + + # Service instances (Dependency Injection) + self.discovery = SwitchDiscoveryService(self.ctx) + self.fabric_ops = SwitchFabricOps(self.ctx, self.fabric_utils) + self.poap_handler = POAPHandler(self.ctx, self.fabric_ops, self.wait_utils) + self.rma_handler = RMAHandler(self.ctx, self.fabric_ops, self.wait_utils) + + log.info("Initialized NDSwitchResourceModule for fabric: %s", self.fabric) + + def _inventory_to_config_list(self, collection: "NDConfigCollection") -> List[Dict[str, Any]]: + """Convert an inventory collection (SwitchDataModel) to gathered-format config dicts. + + Produces the same shape as gathered state output: seed_ip, role, auth_proto, + preserve_config, username/password placeholders. Built directly from + SwitchDataModel fields to avoid re-running Pydantic validators. + """ + result = [] + for sw in collection: + if not sw.fabric_management_ip: + continue + role = sw.switch_role + result.append( + { + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "auth_proto": "MD5", + "preserve_config": False, + "username": "", + "password": "", + } + ) + return result + + def _proposed_to_config_list(self, configs: List["SwitchConfigModel"]) -> List[Dict[str, Any]]: + """Serialize proposed configs for output, stripping internal fields and masking passwords.""" + result = [] + for cfg in configs: + try: + entry = cfg.to_config() + entry.pop("platform_type", None) + entry.pop("operation_type", None) + entry["password"] = "" + result.append(entry) + except Exception as exc: + self.log.warning("Could not convert config %s for output: %s", cfg.seed_ip, exc) + return result + + def _build_check_mode_output(self) -> Dict[str, Any]: + """Build before/after/diff/changed output for check mode. + + Since no API writes are issued in check mode, ``self.sent`` and + ``self.sent_adds`` are always empty. This method derives the same + information directly from the action plan (``self._plan``) and the + real pre-operation inventory snapshot (``self.before``). + + For ``deleted`` state the plan may be ``None`` (no config supplied), + so the entire existing inventory is treated as the deletion target. + + Returns: + Dict suitable for merging into the final ``exit_json`` payload, + containing ``before``, ``after``, ``diff``, and ``changed``. + """ + before_list = self._inventory_to_config_list(self.before) + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.before} + diff_list: List[Dict[str, Any]] = [] + + if self._plan is not None: + plan = self._plan + + # Switches that would be deleted + deleted_sws: List[SwitchDataModel] = list(plan.to_delete) + list(plan.to_delete_existing) + if self.state == "deleted": + # _handle_deleted_state fills plan.to_delete only for + # overridden; for state=deleted the deletions come from the + # handler's own switch-by-switch loop which we replicate here. + deleted_sws = [ + sw for sw in self.before if sw.fabric_management_ip in {cfg.seed_ip for cfg in (self.proposed_cfgs or [])} or not self.proposed_cfgs + ] + for sw in deleted_sws: + if not sw.fabric_management_ip: + continue + role = sw.switch_role + diff_list.append( + { + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "_action": "deleted", + } + ) + + # Switches that would be added (normal to_add + POAP/preprov/rma) + adds: List[SwitchConfigModel] = ( + list(plan.to_add) + list(plan.normal_readd) + list(plan.to_bootstrap) + list(plan.to_preprovision) + list(plan.to_swap) + list(plan.to_rma) + ) + for cfg in adds: + try: + entry = cfg.to_config() + entry.pop("platform_type", None) + entry.pop("operation_type", None) + entry["password"] = "" + entry["_action"] = "added" + diff_list.append(entry) + except Exception as exc: + self.log.warning("check_mode diff: could not convert %s: %s", cfg.seed_ip, exc) + + # Switches whose role would be updated (overridden/replaced) + for cfg in plan.to_update: + try: + entry = cfg.to_config() + entry.pop("platform_type", None) + entry.pop("operation_type", None) + entry["password"] = "" + entry["_action"] = "updated" + diff_list.append(entry) + except Exception as exc: + self.log.warning("check_mode diff: could not convert %s: %s", cfg.seed_ip, exc) + + # Simulate the post-operation inventory for "after": + # start from before, remove deletions, add additions as stubs + deleted_ips = {sw.fabric_management_ip for sw in deleted_sws} + after_list = [e for e in before_list if e.get("seed_ip") not in deleted_ips] + for cfg in adds: + # Mirror the format produced by _inventory_to_config_list — no + # poap/preprovision sub-blocks since those reflect the user's + # desired discovery method, not the resulting inventory state. + role = cfg.role + after_list.append( + { + "seed_ip": cfg.seed_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "auth_proto": "MD5", + "preserve_config": bool(getattr(cfg, "preserve_config", False)), + "username": "", + "password": "", + } + ) + # Apply role updates in-place + update_role_map = {cfg.seed_ip: cfg for cfg in plan.to_update} + for entry in after_list: + ip = entry.get("seed_ip") + if ip in update_role_map: + role = update_role_map[ip].role + entry["role"] = getattr(role, "value", str(role)) if role else entry.get("role") + else: + # deleted state with no config — would delete everything + after_list = [] + for sw in self.before: + if not sw.fabric_management_ip: + continue + role = sw.switch_role + diff_list.append( + { + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "_action": "deleted", + } + ) + + changed = bool(diff_list) + output_level = self.module.params.get("output_level", "normal") + result: Dict[str, Any] = { + "output_level": output_level, + "changed": changed, + "before": before_list, + "after": after_list, + "diff": diff_list, + } + if output_level in ("info", "debug"): + result["proposed"] = self._proposed_to_config_list(self.proposed_cfgs) + return result + + def exit_json(self) -> None: + """Finalize collected results and exit the Ansible module. + + Includes operation logs and previous/current inventory snapshots in the + final response payload. + + Returns: + None. + """ + self.results.build_final_result() + final = self.results.final_result + + if self.state == "gathered": + # gathered: expose the already-queried inventory in config shape. + # No re-query needed — nothing was changed. + gathered = [] + for sw in self.existing: + try: + gathered.append(SwitchConfigModel.from_switch_data(sw).to_gathered_dict()) + except (ValueError, Exception) as exc: + msg = f"Failed to convert switch {sw.switch_id!r} to gathered format: {exc}" + self.log.error(msg) + self.nd.module.fail_json(msg=msg) + self.output.assign(after=self.existing) + final.update(self.output.format(gathered=gathered)) + elif self.nd.module.check_mode: + final.update(self._build_check_mode_output()) + else: + # Re-query the fabric to get the actual post-operation inventory so + # that "after" reflects real state rather than the pre-op snapshot. + if True not in self.results.failed: + self.existing = NDConfigCollection.from_api_response( + response_data=self._query_all_switches(), + model_class=SwitchDataModel, + ) + # Build diff: deletes (from self.sent) + adds (from self.sent_adds) + diff_list: List[Dict[str, Any]] = [] + for sw in self.sent: + if not sw.fabric_management_ip: + continue + role = sw.switch_role + entry = { + "seed_ip": sw.fabric_management_ip, + "role": getattr(role, "value", str(role)) if role else "leaf", + "auth_proto": "MD5", + "preserve_config": False, + "username": "", + "password": "", + "_action": "deleted", + } + diff_list.append(entry) + for cfg in self.sent_adds: + try: + entry = cfg.to_config() + entry.pop("platform_type", None) + entry.pop("operation_type", None) + entry["password"] = "" + entry["_action"] = "added" + diff_list.append(entry) + except Exception as exc: + self.log.warning("Could not convert added config for diff: %s", exc) + output_level = self.module.params.get("output_level", "normal") + fmt_kwargs: Dict[str, Any] = { + "before": self._inventory_to_config_list(self.before), + "after": self._inventory_to_config_list(self.existing), + "diff": diff_list, + } + if output_level in ("info", "debug"): + fmt_kwargs["proposed"] = self._proposed_to_config_list(self.proposed_cfgs) + self.output.assign(before=self.before, after=self.existing) + final.update(self.output.format(**fmt_kwargs)) + + if self.msg: + final["msg"] = self.msg + if True in self.results.failed: + self.nd.module.fail_json(**final) + self.nd.module.exit_json(**final) + + # ===================================================================== + # Public API – State Management + # ===================================================================== + + def manage_state(self) -> None: + """Dispatch the requested module state to the appropriate workflow. + + Unified entry point for all states. The flow is: + + 1. Validate and route simple states (gathered, deleted). + 2. Validate the full config, enforce state constraints. + 3. Call ``compute_changes`` with **all** configs in one pass — this + classifies normal, POAP/preprovision, swap, and RMA configs against + the current fabric inventory and handles idempotency. + 4. Discover all switches that need it in **one combined call**. + 5. Delegate to the appropriate state handler with the populated plan + and the single ``discovered_data`` dict. + + Returns: + None. + """ + self.log.info("Managing state: %s", self.state) + + # gathered — read-only, no config accepted + if self.state == "gathered": + if self.config: + self.nd.module.fail_json(msg="'config' must not be provided for 'gathered' state.") + return self._handle_gathered_state() + + # deleted — config is optional; handled separately (lighter path) + if self.state == "deleted": + proposed_config = SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) if self.config else None + return self._handle_deleted_state(proposed_config) + + # merged / replaced — config required + if self.state in ("merged", "replaced") and not self.config: + self.nd.module.fail_json(msg=f"'config' is required for '{self.state}' state.") + + # overridden with no/empty config — delete everything + if self.state == "overridden" and not self.config: + self.log.info("Overridden state with no config — deleting all switches from fabric") + return self._handle_deleted_state(None) + + # --- Validate & classify ------------------------------------------------ + proposed_config = SwitchDiffEngine.validate_configs(self.config, self.state, self.nd, self.log) + + # Enforce state constraints + rma_configs = [c for c in proposed_config if c.operation_type == "rma"] + poap_configs = [c for c in proposed_config if c.operation_type in ("poap", "preprovision", "swap")] + if rma_configs and self.state != "merged": + self.nd.module.fail_json(msg="RMA configs are only supported with state=merged") + + # Capture all proposed configs for NDOutput + output_proposed: NDConfigCollection = NDConfigCollection(model_class=SwitchConfigModel) + for cfg in proposed_config: + output_proposed.add(cfg) + self.output.assign(proposed=output_proposed) + self.proposed_cfgs = list(proposed_config) + + # Classify all configs in one pass — idempotency included + plan = SwitchDiffEngine.compute_changes(proposed_config, list(self.existing), self.log) + self._plan = plan + + # --- Single combined discovery pass ------------------------------------- + # Discover every switch that is not yet in the fabric: + # • plan.to_add — normal switches not in inventory + # • plan.normal_readd — POAP/preprov mismatches that are reachable + # Switches already in the fabric (to_update, migration_mode) are + # skipped here; overridden will re-discover them after deletion. + # + # In check mode, discovery is skipped entirely: new switches are not + # yet reachable/enrolled so shallow discovery would fail or return no + # data. The per-state check-mode guards handle reporting via the diff. + configs_to_discover = plan.to_add + plan.normal_readd + if configs_to_discover: + if self.nd.module.check_mode: + self.log.info( + "Check mode: skipping discovery for %s switch(es) (%s normal-add, %s poap-readd) — assuming to_add", + len(configs_to_discover), + len(plan.to_add), + len(plan.normal_readd), + ) + discovered_data = {} + else: + self.log.info( + "Discovering %s switch(es): %s normal-add, %s poap-readd", + len(configs_to_discover), + len(plan.to_add), + len(plan.normal_readd), + ) + discovered_data = self.discovery.discover(configs_to_discover) + else: + self.log.info("No switches need discovery in this run") + discovered_data = {} + + # Build proposed SwitchDataModel collection for normal switches only + # (needed for the self.proposed reference used in check-mode reporting). + # Skipped in check mode since discovered_data is empty for new switches. + normal_configs = [c for c in proposed_config if c.operation_type == "normal"] + if normal_configs and not self.nd.module.check_mode: + built = self.discovery.build_proposed(normal_configs, discovered_data, list(self.existing)) + self.proposed = NDConfigCollection(model_class=SwitchDataModel, items=built) + + # --- Dispatch ----------------------------------------------------------- + if self.state == "merged": + self._handle_merged_state(plan, discovered_data) + elif self.state == "replaced": + self._handle_replaced_state(plan, discovered_data) + elif self.state == "overridden": + self._handle_overridden_state(plan, discovered_data) + else: + self.nd.module.fail_json(msg=f"Unsupported state: {self.state}") + + # ===================================================================== + # State Handlers (orchestration only — delegate to services) + # ===================================================================== + + def _check_idempotent_sync( + self, + plan: "SwitchPlan", + existing_by_ip: Dict[str, "SwitchDataModel"], + ) -> bool: + """Return True if any non-preprovision idempotent switch is out of config-sync. + + Pre-provisioned switches are placeholder entries that are never + in-sync by design and are excluded from this check. Only relevant + when deploy is enabled; returns False immediately otherwise. + + Args: + plan: Action plan from :meth:`SwitchDiffEngine.compute_changes`. + existing_by_ip: Existing switches keyed by fabric management IP. + + Returns: + True if finalize should run for idempotent switches, False otherwise. + """ + if not self.ctx.deploy_config: + return False + for cfg in plan.idempotent: + if cfg.operation_type == "preprovision": + continue + sw = existing_by_ip.get(cfg.seed_ip) + status = sw.additional_data.config_sync_status if sw and sw.additional_data else None + if status != ConfigSyncStatus.IN_SYNC: + self.log.info( + "Switch %s is idempotent but configSyncStatus='%s' — will finalize", + cfg.seed_ip, + getattr(status, "value", status) if status else "unknown", + ) + return True + return False + + def _handle_merged_state( + self, + plan: "SwitchPlan", + discovered_data: Dict[str, Dict[str, Any]], + ) -> None: + """Handle merged-state workflows for all operation types. + + Processes normal adds, migration-mode switches, POAP bootstrap, + pre-provision, swap, normal re-adds, and RMA in a single pass. + Normal switches that require field-level updates fail fast; use + ``overridden`` state for in-place updates. + + Args: + plan: Unified action plan from :meth:`SwitchDiffEngine.compute_changes`. + discovered_data: Discovery data keyed by seed IP for all switches + that required discovery this run. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_merged_state()") + self.log.info("Handling merged state") + + # Fail if any normal switches need field-level updates + if plan.to_update: + ips = [cfg.seed_ip for cfg in plan.to_update] + self.nd.module.fail_json( + msg=( + f"Switches require role updates not supported in merged state. " + f"Use 'overridden' state for in-place updates. " + f"Affected switches: {ips}" + ) + ) + + # Fail if any POAP/preprovision switches already in fabric differ on + # one or more of: serial, role, model, version, hostname — + # delete+re-provision is destructive and only permitted in overridden state. + if plan.to_delete_existing: + ips = [sw.fabric_management_ip for sw in plan.to_delete_existing] + self.nd.module.fail_json( + msg=( + f"POAP/preprovision switches already in fabric have a " + f"field mismatch (serial, role, model, version, or hostname) " + f"and require delete + re-provision. " + f"Use 'overridden' state to apply this change. " + f"Affected switches: {ips}" + ) + ) + + # Check whether any idempotent switch (normal or POAP) is out of + # config-sync and needs a deploy without a re-add. + # Pre-provisioned switches are placeholder entries that are never + # in-sync by design, so they are excluded from this check. Only relevant when deploy is enabled. + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} + idempotent_save_req = self._check_idempotent_sync(plan, existing_by_ip) + + has_work = bool( + plan.to_add + or plan.migration_mode + or plan.to_bootstrap + or plan.normal_readd + or plan.to_preprovision + or plan.to_swap + or plan.to_rma + or idempotent_save_req + ) + if not has_work: + self.log.info("merged: nothing to do — all switches idempotent") + self.msg = "No switches to merge — fabric already matches desired config" + return + + # Check mode + if self.nd.module.check_mode: + self.log.info( + "Check mode: add=%s, migrate=%s, bootstrap=%s, readd=%s, preprov=%s, swap=%s, rma=%s, save_deploy=%s", + len(plan.to_add), + len(plan.migration_mode), + len(plan.to_bootstrap), + len(plan.normal_readd), + len(plan.to_preprovision), + len(plan.to_swap), + len(plan.to_rma), + idempotent_save_req, + ) + self.results.action = "merge" + self.results.state = self.state + self.results.operation_type = OperationType.CREATE + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.result_current = {"success": True, "changed": False} + self.results.diff_current = { + "to_add": [c.seed_ip for c in plan.to_add], + "migration_mode": [c.seed_ip for c in plan.migration_mode], + "bootstrap": [c.seed_ip for c in plan.to_bootstrap], + "normal_readd": [c.seed_ip for c in plan.normal_readd], + "preprovision": [c.seed_ip for c in plan.to_preprovision], + "swap": [c.seed_ip for c in plan.to_swap], + "rma": [c.seed_ip for c in plan.to_rma], + "save_deploy_required": idempotent_save_req, + } + self.results.register_api_call() + return + + # --- Normal + normal_readd bulk_add (one combined pass) ----------------- + add_configs = plan.to_add + plan.normal_readd + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + have_migration = bool(plan.migration_mode) + + if add_configs and discovered_data: + credential_groups = group_switches_by_credentials(add_configs, self.log) + for group_key, group_switches in credential_groups.items(): + username, _pw_hash, auth_proto, platform_type, preserve_config = group_key + password = group_switches[0].password + pairs = [(cfg, discovered_data[cfg.seed_ip]) for cfg in group_switches if cfg.seed_ip in discovered_data] + if not pairs: + self.log.warning( + "No discovery data for group %s — skipping bulk_add", + [cfg.seed_ip for cfg in group_switches], + ) + continue + self.fabric_ops.bulk_add( + switches=pairs, + username=username, + password=password, + auth_proto=auth_proto, + platform_type=platform_type, + preserve_config=preserve_config, + ) + for cfg, disc in pairs: + sn = disc.get("serialNumber") + if sn: + switch_actions.append((sn, cfg)) + self._log_operation("add", cfg.seed_ip) + self.sent_adds.append(cfg) + + # Migration-mode switches — no add needed, but role + finalize applies + for cfg in plan.migration_mode: + sw = existing_by_ip.get(cfg.seed_ip) + if sw and sw.switch_id: + switch_actions.append((sw.switch_id, cfg)) + self._log_operation("migrate", cfg.seed_ip) + self.sent_adds.append(cfg) + + if switch_actions: + all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) + if all_preserve_config: + self.log.info("All switches brownfield (preserve_config=True) — reload detection skipped") + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="merged", + all_preserve_config=all_preserve_config, + update_roles=have_migration, + ) + elif idempotent_save_req: + self.log.info("No adds/migrations but config-sync required — running finalize") + sync_serials = [ + existing_by_ip[cfg.seed_ip].switch_id for cfg in plan.idempotent if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id + ] + self.fabric_ops.finalize(serial_numbers=sync_serials) + + # --- POAP / preprovision / swap / RMA ----------------------------------- + # normal_readd was already processed via bulk_add above. + # Only route the pure POAP-workflow configs to the handler. + poap_workflow_configs = plan.to_bootstrap + plan.to_preprovision + plan.to_swap + if poap_workflow_configs: + self.sent_adds.extend(poap_workflow_configs) + self.poap_handler.handle(poap_workflow_configs, list(self.existing)) + if plan.to_rma: + self.sent_adds.extend(plan.to_rma) + self.rma_handler.handle(plan.to_rma, list(self.existing)) + + self.log.debug("EXIT: _handle_merged_state()") + + def _handle_overridden_state( + self, + plan: "SwitchPlan", + discovered_data: Dict[str, Dict[str, Any]], + ) -> None: + """Handle overridden-state reconciliation for the fabric. + + Reconciles the fabric to match exactly the desired config. Switches + in the fabric that have no config entry are deleted. POAP/preprovision + switches at ``plan.poap_ips`` are excluded from the cleanup sweep. + Normal switches with field differences are deleted and re-added. + + Args: + plan: Unified action plan from :meth:`SwitchDiffEngine.compute_changes`. + discovered_data: Discovery data keyed by seed IP. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_overridden_state()") + self.log.info("Handling overridden state") + + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} + idempotent_save_req = self._check_idempotent_sync(plan, existing_by_ip) + + has_work = bool( + plan.to_add + or plan.to_update + or plan.to_delete + or plan.migration_mode + or plan.to_bootstrap + or plan.normal_readd + or plan.to_preprovision + or plan.to_swap + or idempotent_save_req + ) + if not has_work: + self.log.info("overridden: nothing to do") + self.msg = "No switches to override — fabric already matches desired config" + return + + # Check mode + if self.nd.module.check_mode: + self.log.info( + "Check mode: delete_orphans=%s, update=%s, add=%s, migrate=%s, bootstrap=%s, readd=%s, preprov=%s, swap=%s, save_deploy=%s", + len(plan.to_delete), + len(plan.to_update), + len(plan.to_add), + len(plan.migration_mode), + len(plan.to_bootstrap), + len(plan.normal_readd), + len(plan.to_preprovision), + len(plan.to_swap), + idempotent_save_req, + ) + self.results.action = "override" + self.results.state = self.state + self.results.operation_type = OperationType.CREATE + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.result_current = {"success": True, "changed": False} + self.results.diff_current = { + "to_delete": len(plan.to_delete) + len(plan.to_delete_existing), + "to_update": len(plan.to_update), + "to_add": len(plan.to_add), + "migration_mode": len(plan.migration_mode), + "bootstrap": len(plan.to_bootstrap), + "normal_readd": len(plan.normal_readd), + "preprovision": len(plan.to_preprovision), + "swap": len(plan.to_swap), + "save_deploy_required": idempotent_save_req, + } + self.results.register_api_call() + return + + # --- Phase 1: Combined delete ------------------------------------------- + # Merge three sources of deletions into one bulk_delete call: + # a) Orphans (in fabric, not in any config) + # b) POAP/preprovision mismatches (to_delete_existing from compute_changes) + # c) Normal switches that need field updates (to_update) + switches_to_delete: List[SwitchDataModel] = list(plan.to_delete) + for sw in plan.to_delete: + self._log_operation("delete", sw.fabric_management_ip) + + for sw in plan.to_delete_existing: + self.log.info("Deleting POAP/preprovision mismatch %s before re-add", sw.fabric_management_ip) + switches_to_delete.append(sw) + self._log_operation("delete", sw.fabric_management_ip) + + update_ips: set = set() + for cfg in plan.to_update: + sw = existing_by_ip.get(cfg.seed_ip) + if sw: + self.log.info("Deleting normal switch %s for field update re-add", cfg.seed_ip) + switches_to_delete.append(sw) + update_ips.add(cfg.seed_ip) + self._log_operation("delete_for_update", cfg.seed_ip) + + if switches_to_delete: + try: + self.fabric_ops.bulk_delete(switches_to_delete) + except SwitchOperationError as e: + msg = f"Failed to delete switches during overridden state: {e}" + self.log.error(msg) + self.nd.module.fail_json(msg=msg) + for sw in switches_to_delete: + self.sent.add(sw) + + # --- Phase 2: Re-discover updated normal switches ----------------------- + # to_update configs were already discovered (they were in-fabric) but + # we deleted them; re-discover so bulk_add has current data. + re_discover_configs = [cfg for cfg in plan.to_update if cfg.seed_ip in update_ips] + if re_discover_configs: + self.log.info( + "Re-discovering %s updated switch(es) after deletion", + len(re_discover_configs), + ) + fresh = self.discovery.discover(re_discover_configs) + discovered_data = {**discovered_data, **fresh} + + # --- Phase 3: Combined add (normal to_add + to_update + normal_readd) --- + add_configs = plan.to_add + plan.to_update + plan.normal_readd + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + have_migration = bool(plan.migration_mode) + + if add_configs and discovered_data: + credential_groups = group_switches_by_credentials(add_configs, self.log) + for group_key, group_switches in credential_groups.items(): + username, _pw_hash, auth_proto, platform_type, preserve_config = group_key + password = group_switches[0].password + pairs = [(cfg, discovered_data[cfg.seed_ip]) for cfg in group_switches if cfg.seed_ip in discovered_data] + if not pairs: + self.log.warning( + "No discovery data for group %s — skipping", + [cfg.seed_ip for cfg in group_switches], + ) + continue + self.fabric_ops.bulk_add( + switches=pairs, + username=username, + password=password, + auth_proto=auth_proto, + platform_type=platform_type, + preserve_config=preserve_config, + ) + for cfg, disc in pairs: + sn = disc.get("serialNumber") + if sn: + switch_actions.append((sn, cfg)) + self._log_operation("add", cfg.seed_ip) + self.sent_adds.append(cfg) + + for cfg in plan.migration_mode: + sw = existing_by_ip.get(cfg.seed_ip) + if sw and sw.switch_id: + switch_actions.append((sw.switch_id, cfg)) + self._log_operation("migrate", cfg.seed_ip) + self.sent_adds.append(cfg) + + if switch_actions: + all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="overridden", + all_preserve_config=all_preserve_config, + update_roles=have_migration, + ) + elif idempotent_save_req: + self.log.info("No adds/migrations but config-sync required — running finalize") + sync_serials = [ + existing_by_ip[cfg.seed_ip].switch_id for cfg in plan.idempotent if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id + ] + self.fabric_ops.finalize(serial_numbers=sync_serials) + + # --- Phase 4: POAP workflows (bootstrap / preprovision / swap) ---------- + # plan.to_delete_existing was deleted in Phase 1. + # Route pure POAP-workflow configs to the handler. + poap_workflow_configs = plan.to_bootstrap + plan.to_preprovision + plan.to_swap + if poap_workflow_configs: + self.sent_adds.extend(poap_workflow_configs) + self.poap_handler.handle(poap_workflow_configs, list(self.existing)) + + self.log.debug("EXIT: _handle_overridden_state()") + + def _handle_replaced_state( + self, + plan: "SwitchPlan", + discovered_data: Dict[str, Any], + ) -> None: + """Handle replaced-state reconciliation for the fabric. + + Reconciles only the switches listed in the desired config. Field + differences trigger delete and re-add, and POAP/preprovision mismatches + are also re-provisioned. + + Args: + plan: Unified action plan from :meth:`SwitchDiffEngine.compute_changes`. + discovered_data: Discovery data keyed by seed IP. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_replaced_state()") + self.log.info("Handling replaced state") + + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} + idempotent_save_req = self._check_idempotent_sync(plan, existing_by_ip) + + has_work = bool( + plan.to_add + or plan.to_update + or plan.to_delete_existing + or plan.migration_mode + or plan.to_bootstrap + or plan.normal_readd + or plan.to_preprovision + or plan.to_swap + or idempotent_save_req + ) + if not has_work: + self.log.info("replaced: nothing to do") + self.msg = "No switches to replace — fabric already matches desired config" + return + + # Check mode + if self.nd.module.check_mode: + self.log.info( + "Check mode: poap_mismatch_delete=%s, update=%s, add=%s, migrate=%s, bootstrap=%s, readd=%s, preprov=%s, swap=%s, save_deploy=%s", + len(plan.to_delete_existing), + len(plan.to_update), + len(plan.to_add), + len(plan.migration_mode), + len(plan.to_bootstrap), + len(plan.normal_readd), + len(plan.to_preprovision), + len(plan.to_swap), + idempotent_save_req, + ) + self.results.action = "replace" + self.results.state = self.state + self.results.operation_type = OperationType.CREATE + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.result_current = {"success": True, "changed": False} + self.results.diff_current = { + "to_delete": len(plan.to_delete_existing), + "to_update": len(plan.to_update), + "to_add": len(plan.to_add), + "migration_mode": len(plan.migration_mode), + "bootstrap": len(plan.to_bootstrap), + "normal_readd": len(plan.normal_readd), + "preprovision": len(plan.to_preprovision), + "swap": len(plan.to_swap), + "save_deploy_required": idempotent_save_req, + } + self.results.register_api_call() + return + + # --- Phase 1: Combined delete ------------------------------------------- + # Two sources of deletions (orphans intentionally excluded): + # a) POAP/preprovision mismatches (to_delete_existing from compute_changes) + # b) Normal switches that need field updates (to_update) + switches_to_delete: List[SwitchDataModel] = [] + + for sw in plan.to_delete_existing: + self.log.info("Deleting POAP/preprovision mismatch %s before re-add", sw.fabric_management_ip) + switches_to_delete.append(sw) + self._log_operation("delete", sw.fabric_management_ip) + + update_ips: set = set() + for cfg in plan.to_update: + sw = existing_by_ip.get(cfg.seed_ip) + if sw: + self.log.info("Deleting normal switch %s for field update re-add", cfg.seed_ip) + switches_to_delete.append(sw) + update_ips.add(cfg.seed_ip) + self._log_operation("delete_for_update", cfg.seed_ip) + + if switches_to_delete: + try: + self.fabric_ops.bulk_delete(switches_to_delete) + except SwitchOperationError as e: + msg = f"Failed to delete switches during replaced state: {e}" + self.log.error(msg) + self.nd.module.fail_json(msg=msg) + for sw in switches_to_delete: + self.sent.add(sw) + + # --- Phase 2: Re-discover updated normal switches ----------------------- + re_discover_configs = [cfg for cfg in plan.to_update if cfg.seed_ip in update_ips] + if re_discover_configs: + self.log.info( + "Re-discovering %s updated switch(es) after deletion", + len(re_discover_configs), + ) + fresh = self.discovery.discover(re_discover_configs) + discovered_data = {**discovered_data, **fresh} + + # --- Phase 3: Combined add (normal to_add + to_update + normal_readd) --- + add_configs = plan.to_add + plan.to_update + plan.normal_readd + switch_actions: List[Tuple[str, SwitchConfigModel]] = [] + have_migration = bool(plan.migration_mode) + + if add_configs and discovered_data: + credential_groups = group_switches_by_credentials(add_configs, self.log) + for group_key, group_switches in credential_groups.items(): + username, _pw_hash, auth_proto, platform_type, preserve_config = group_key + password = group_switches[0].password + pairs = [(cfg, discovered_data[cfg.seed_ip]) for cfg in group_switches if cfg.seed_ip in discovered_data] + if not pairs: + self.log.warning( + "No discovery data for group %s — skipping", + [cfg.seed_ip for cfg in group_switches], + ) + continue + self.fabric_ops.bulk_add( + switches=pairs, + username=username, + password=password, + auth_proto=auth_proto, + platform_type=platform_type, + preserve_config=preserve_config, + ) + for cfg, disc in pairs: + sn = disc.get("serialNumber") + if sn: + switch_actions.append((sn, cfg)) + self._log_operation("add", cfg.seed_ip) + self.sent_adds.append(cfg) + + for cfg in plan.migration_mode: + sw = existing_by_ip.get(cfg.seed_ip) + if sw and sw.switch_id: + switch_actions.append((sw.switch_id, cfg)) + self._log_operation("migrate", cfg.seed_ip) + self.sent_adds.append(cfg) + + if switch_actions: + all_preserve_config = all(cfg.preserve_config for _sn, cfg in switch_actions) + self.fabric_ops.post_add_processing( + switch_actions, + wait_utils=self.wait_utils, + context="replaced", + all_preserve_config=all_preserve_config, + update_roles=have_migration, + ) + elif idempotent_save_req: + self.log.info("No adds/migrations but config-sync required — running finalize") + sync_serials = [ + existing_by_ip[cfg.seed_ip].switch_id for cfg in plan.idempotent if cfg.seed_ip in existing_by_ip and existing_by_ip[cfg.seed_ip].switch_id + ] + self.fabric_ops.finalize(serial_numbers=sync_serials) + + # --- Phase 4: POAP workflows (bootstrap / preprovision / swap) ---------- + poap_workflow_configs = plan.to_bootstrap + plan.to_preprovision + plan.to_swap + if poap_workflow_configs: + self.sent_adds.extend(poap_workflow_configs) + self.poap_handler.handle(poap_workflow_configs, list(self.existing)) + + self.log.debug("EXIT: _handle_replaced_state()") + + def _handle_gathered_state(self) -> None: + """Handle gathered-state read of the fabric inventory. + + No API writes are performed. The existing inventory is serialised into + SwitchConfigModel shape by exit_json(). This method only records the + result metadata so that Results aggregation works correctly. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_gathered_state()") + self.log.info("Gathering inventory for fabric '%s'", self.fabric) + + if not self.existing: + self.log.info("Fabric '%s' has no switches in inventory", self.fabric) + + self.results.action = "gathered" + self.results.state = self.state + self.results.operation_type = OperationType.QUERY + self.results.response_current = {"MESSAGE": "gathered", "RETURN_CODE": 200} + self.results.result_current = {"success": True, "changed": False} + self.results.diff_current = {} + self.results.register_api_call() + + self.log.info( + "Gathered %s switch(es) from fabric '%s'", + len(list(self.existing)), + self.fabric, + ) + self.log.debug("EXIT: _handle_gathered_state()") + + def _handle_deleted_state( + self, + proposed_config: Optional[List[SwitchConfigModel]] = None, + ) -> None: + """Handle deleted-state switch removal. + + Matches switches to delete by ``seed_ip`` and optionally ``role``. + POAP/preprovision sub-config blocks (``poap``, ``preprovision``) are + ignored; only ``seed_ip`` and ``role`` matter. When no config is + provided, all switches in the fabric are deleted. + + Args: + proposed_config: Optional config list that limits deletion scope. + Pass ``None`` to delete all switches. + + Returns: + None. + """ + self.log.debug("ENTER: _handle_deleted_state()") + self.log.info("Handling deleted state") + + if proposed_config is None: + switches_to_delete = list(self.existing) + self.log.info( + "No proposed config — targeting all %s existing switch(es) for deletion", + len(switches_to_delete), + ) + for sw in switches_to_delete: + self._log_operation("delete", sw.fabric_management_ip) + else: + existing_by_ip = {sw.fabric_management_ip: sw for sw in self.existing} + switches_to_delete: List[SwitchDataModel] = [] + for cfg in proposed_config: + existing_sw = existing_by_ip.get(cfg.seed_ip) + if not existing_sw: + self.log.info("deleted: switch %s not in fabric — skipping", cfg.seed_ip) + continue + # Role filter: if config specifies a role, only delete if it matches + if cfg.role is not None and cfg.role != existing_sw.switch_role: + self.log.info( + "deleted: switch %s role mismatch (config=%s, fabric=%s) — skipping", + cfg.seed_ip, + cfg.role, + existing_sw.switch_role, + ) + continue + self.log.info( + "deleted: marking %s (%s) for deletion", + cfg.seed_ip, + existing_sw.switch_id, + ) + switches_to_delete.append(existing_sw) + self._log_operation("delete", cfg.seed_ip) + + self.log.info("Total switches marked for deletion: %s", len(switches_to_delete)) + if not switches_to_delete: + self.log.info("No switches to delete") + self.msg = "No switches to delete - fabric already matches desired config" + return + + # Check mode + if self.nd.module.check_mode: + self.log.info("Check mode: would delete %s switch(es)", len(switches_to_delete)) + self.results.action = "delete" + self.results.state = self.state + self.results.operation_type = OperationType.DELETE + self.results.response_current = {"MESSAGE": "check mode — skipped", "RETURN_CODE": 200} + self.results.result_current = {"success": True, "changed": False} + self.results.diff_current = { + "to_delete": [sw.fabric_management_ip for sw in switches_to_delete], + } + self.results.register_api_call() + return + + self.log.info("Proceeding to delete %s switch(es) from fabric", len(switches_to_delete)) + self.fabric_ops.bulk_delete(switches_to_delete) + for sw in switches_to_delete: + self.sent.add(sw) + self.log.debug("EXIT: _handle_deleted_state()") + + # ===================================================================== + # Query Helpers + # ===================================================================== + + def _query_all_switches(self) -> List[Dict[str, Any]]: + """Query all switches from the fabric inventory API. + + Returns: + List of raw switch dictionaries returned by the controller. + """ + endpoint = EpManageFabricsSwitchesGet() + endpoint.fabric_name = self.fabric + self.log.debug("Querying all switches with endpoint: %s", endpoint.path) + self.log.debug("Query verb: %s", endpoint.verb) + + # GETs must reach the real controller even when Ansible runs with --check. + # Temporarily override check_mode to False so RestSend sends the real + # request instead of returning a simulated response, then restore it. + in_check_mode = self.nd.module.check_mode + if in_check_mode: + self.nd.rest_send_check_mode = False + try: + result = self.nd.request(path=endpoint.path, verb=endpoint.verb) + except Exception as e: + msg = f"Failed to query switches from fabric '{self.fabric}': {e}" + self.log.error(msg) + self.nd.module.fail_json(msg=msg) + finally: + if in_check_mode: + self.nd.rest_send_check_mode = True + + # nd.request() returns response["DATA"] directly. For a 404, the + # controller embeds the error as {"code": 404, "message": "Fabric not found"} + # inside DATA. RestSend treats GET 404 as success=True/found=False so no + # exception is raised — detect it here from the returned data itself. + ApiDataChecker.check(result, f"Query switches from fabric '{self.fabric}'", self.log, self.nd.module.fail_json) + + if isinstance(result, list): + switches = result + elif isinstance(result, dict): + switches = result.get("switches", []) + else: + switches = [] + + self.log.debug("Queried %s switches from fabric %s", len(switches), self.fabric) + return switches + + # ===================================================================== + # Operation Tracking + # ===================================================================== + + def _log_operation(self, operation: str, identifier: str) -> None: + """Append a successful operation record to the module log. + + Args: + operation: Operation label. + identifier: Switch identifier for the operation. + + Returns: + None. + """ + self.nd_logs.append( + { + "operation": operation, + "identifier": identifier, + "status": "success", + } + ) diff --git a/plugins/module_utils/manage_switches/utils.py b/plugins/module_utils/manage_switches/utils.py new file mode 100644 index 000000000..d7094adde --- /dev/null +++ b/plugins/module_utils/manage_switches/utils.py @@ -0,0 +1,886 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Utility helpers for nd_manage_switches: exceptions, fabric operations, +payload construction, credential grouping, bootstrap queries, and +multi-phase switch wait utilities. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import logging +import time +from copy import deepcopy +from typing import Any, Dict, List, Optional, Tuple + +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_bootstrap import ( + EpManageFabricsBootstrapGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_inventory import ( + EpManageFabricsInventoryDiscoverGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switches import ( + EpManageFabricsSwitchesGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switchactions import ( + EpManageFabricsSwitchActionsRediscoverPost, +) +from ansible_collections.cisco.nd.plugins.module_utils.utils import ( + FabricUtils, + SwitchOperationError, +) + +# ========================================================================= +# Payload Utilities +# ========================================================================= + + +def mask_password(payload: Dict[str, Any]) -> Dict[str, Any]: + """Return a deep copy of *payload* with password fields masked. + + Useful for safe logging of API payloads that contain credentials. + + Args: + payload: API payload dict (may contain ``password`` keys). + + Returns: + Copy with every ``password`` value replaced by ``"********"``. + """ + masked = deepcopy(payload) + if "password" in masked: + masked["password"] = "********" + if isinstance(masked.get("switches"), list): + for switch in masked["switches"]: + if isinstance(switch, dict) and "password" in switch: + switch["password"] = "********" + return masked + + +class PayloadUtils: + """Stateless helper for building ND Switch Resource API request payloads.""" + + def __init__(self, logger: Optional[logging.Logger] = None): + """Initialize PayloadUtils. + + Args: + logger: Optional logger; defaults to ``nd.PayloadUtils``. + """ + self.log = logger or logging.getLogger("nd.PayloadUtils") + + def build_credentials_payload( + self, + serial_numbers: List[str], + username: str, + password: str, + ) -> Dict[str, Any]: + """Build payload for saving switch credentials. + + Args: + serial_numbers: Switch serial numbers. + username: Switch username. + password: Switch password. + + Returns: + Credentials API payload dict. + """ + return { + "switchIds": serial_numbers, + "username": username, + "password": password, + } + + def build_switch_ids_payload( + self, + serial_numbers: List[str], + ) -> Dict[str, Any]: + """Build payload with switch IDs for remove / batch operations. + + Args: + serial_numbers: Switch serial numbers. + + Returns: + ``{"switchIds": [...]}`` payload dict. + """ + return {"switchIds": serial_numbers} + + +# ========================================================================= +# Switch Helpers +# ========================================================================= + + +def get_switch_field( + switch, + field_names: List[str], +) -> Optional[Any]: + """Extract a field value from a switch config, trying multiple names. + + Supports Pydantic models and plain dicts with both snake_case and + camelCase key lookups. + + Args: + switch: Switch model or dict to extract from. + field_names: Candidate field names to try, in priority order. + + Returns: + First non-``None`` value found, or ``None``. + """ + for name in field_names: + if hasattr(switch, name): + value = getattr(switch, name) + if value is not None: + return value + elif isinstance(switch, dict): + if name in switch and switch[name] is not None: + return switch[name] + # Try camelCase variant + camel = "".join(word.capitalize() if i > 0 else word for i, word in enumerate(name.split("_"))) + if camel in switch and switch[camel] is not None: + return switch[camel] + return None + + +def determine_operation_type(switch) -> str: + """Determine the operation type from switch configuration. + + Args: + switch: A ``SwitchConfigModel``, ``SwitchDiscoveryModel``, + or raw dict. + + Returns: + ``'normal'``, ``'poap'``, or ``'rma'``. + """ + # Pydantic model with .operation_type attribute + if hasattr(switch, "operation_type"): + return switch.operation_type + + if isinstance(switch, dict): + if "poap" in switch or "bootstrap" in switch: + return "poap" + if "rma" in switch or "old_serial" in switch or "oldSerial" in switch: + return "rma" + + return "normal" + + +def group_switches_by_credentials( + switches, + log: logging.Logger, +) -> Dict[Tuple, list]: + """Group switches by shared credentials for bulk API operations. + + Args: + switches: Validated ``SwitchConfigModel`` instances. + log: Logger. + + Returns: + Dict mapping a ``(username, password_hash, auth_proto, + platform_type, preserve_config)`` tuple to the list of switches + sharing those credentials. + """ + groups: Dict[Tuple, list] = {} + + for switch in switches: + password_hash = hash(switch.password) + group_key = ( + switch.username, + password_hash, + switch.auth_proto, + switch.platform_type, + switch.preserve_config, + ) + groups.setdefault(group_key, []).append(switch) + + log.info("Grouped %s switches into %s credential group(s)", len(switches), len(groups)) + + for idx, (key, group_switches) in enumerate(groups.items(), 1): + username, _pw_hash, auth_proto, platform_type, preserve_config = key + auth_value = auth_proto.value if hasattr(auth_proto, "value") else str(auth_proto) + platform_value = platform_type.value if hasattr(platform_type, "value") else str(platform_type) + log.debug( + "Group %s: %s switches with username=%s, auth=%s, platform=%s, preserve_config=%s", + idx, + len(group_switches), + username, + auth_value, + platform_value, + preserve_config, + ) + + return groups + + +# ========================================================================= +# Bootstrap Utilities +# ========================================================================= + + +def query_bootstrap_switches( + nd, + fabric: str, + log: logging.Logger, +) -> List[Dict[str, Any]]: + """GET switches currently in the bootstrap (POAP / PnP) loop. + + Args: + nd: NDModule instance (REST client). + fabric: Fabric name. + log: Logger. + + Returns: + List of raw switch dicts from the bootstrap API. + """ + log.debug("ENTER: query_bootstrap_switches()") + + endpoint = EpManageFabricsBootstrapGet() + endpoint.fabric_name = fabric + log.debug("Bootstrap endpoint: %s", endpoint.path) + + try: + result = nd.request( + path=endpoint.path, + verb=endpoint.verb, + ) + except Exception as e: + msg = f"Failed to query bootstrap switches for " f"fabric '{fabric}': {e}" + log.error(msg) + nd.module.fail_json(msg=msg) + + if isinstance(result, dict): + switches = result.get("switches", []) + elif isinstance(result, list): + switches = result + else: + switches = [] + + log.info("Bootstrap API returned %s switch(es) in POAP loop", len(switches)) + log.debug("EXIT: query_bootstrap_switches()") + return switches + + +def build_bootstrap_index( + bootstrap_switches: List[Dict[str, Any]], +) -> Dict[str, Dict[str, Any]]: + """Build a serial-number-keyed index from bootstrap API data. + + Args: + bootstrap_switches: Raw switch dicts from the bootstrap API. + + Returns: + Dict mapping ``serial_number`` -> switch dict. + """ + return {sw.get("serialNumber", sw.get("serial_number", "")): sw for sw in bootstrap_switches} + + +def build_poap_data_block(poap_cfg) -> Optional[Dict[str, Any]]: + """Build optional data block for bootstrap and pre-provision models. + + Args: + poap_cfg: ``POAPConfigModel`` from the user playbook. + + Returns: + Data block dict, or ``None`` if no ``config_data`` is present. + """ + if not poap_cfg.config_data: + return None + data_block: Dict[str, Any] = {} + gateway = poap_cfg.config_data.gateway + if gateway: + data_block["gatewayIpMask"] = gateway + if poap_cfg.config_data.models: + data_block["models"] = poap_cfg.config_data.models + return data_block or None + + +# ========================================================================= +# Switch Wait Utilities +# ========================================================================= + + +class SwitchWaitUtils: + """Multi-phase wait utilities for switch lifecycle operations. + + Polls the fabric switches API until target switches reach a manageable state, + handling migration mode, greenfield/brownfield shortcuts, and rediscovery. + """ + + # Default wait parameters + DEFAULT_MAX_ATTEMPTS: int = 300 + DEFAULT_WAIT_INTERVAL: int = 10 # seconds + + # Status values indicating the switch is ready + MANAGEABLE_STATUSES = frozenset({"ok", "manageable"}) + + # Status values indicating an operation is still in progress + IN_PROGRESS_STATUSES = frozenset( + { + "inProgress", + "migration", + "discovering", + "rediscovering", + } + ) + + # Status values indicating failure + FAILED_STATUSES = frozenset( + { + "failed", + "unreachable", + "authenticationFailed", + "timeout", + "discoveryTimeout", + "notReacheable", # Note: typo matches the API spec + "notAuthorized", + "unknownUserPassword", + "connectionError", + "sshSessionError", + } + ) + + # Sleep multipliers for each phase + _MIGRATION_SLEEP_FACTOR: float = 2.0 + _REDISCOVERY_SLEEP_FACTOR: float = 3.5 + + def __init__( + self, + nd_module, + fabric: str, + logger: Optional[logging.Logger] = None, + max_attempts: Optional[int] = None, + wait_interval: Optional[int] = None, + fabric_utils: Optional["FabricUtils"] = None, + ): + """Initialize SwitchWaitUtils. + + Args: + nd_module: Parent module instance (must expose ``.nd``). + fabric: Fabric name. + logger: Optional logger; defaults to ``nd.SwitchWaitUtils``. + max_attempts: Max polling iterations (default ``300``). + wait_interval: Override interval in seconds (default ``5``). + fabric_utils: Optional ``FabricUtils`` instance for fabric + info queries. Created internally if not provided. + """ + self.nd = nd_module.nd + self.fabric = fabric + self.log = logger or logging.getLogger("nd.SwitchWaitUtils") + self.max_attempts = max_attempts or self.DEFAULT_MAX_ATTEMPTS + self.wait_interval = wait_interval or self.DEFAULT_WAIT_INTERVAL + self.fabric_utils = fabric_utils or FabricUtils(nd_module, fabric, self.log) + + # Pre-configure endpoints + self.ep_switches_get = EpManageFabricsSwitchesGet() + self.ep_switches_get.fabric_name = fabric + + self.ep_inventory_discover = EpManageFabricsInventoryDiscoverGet() + self.ep_inventory_discover.fabric_name = fabric + + self.ep_rediscover = EpManageFabricsSwitchActionsRediscoverPost() + self.ep_rediscover.fabric_name = fabric + + # Cached greenfield flag + self._greenfield_debug_enabled: Optional[bool] = None + + # ===================================================================== + # Public API – Wait Methods + # ===================================================================== + + def wait_for_switch_manageable( + self, + serial_numbers: List[str], + all_preserve_config: bool = False, + skip_greenfield_check: bool = False, + ) -> bool: + """Wait for switches to exit migration mode and become manageable. + + Runs a multi-phase poll: migration-mode exit, normal-mode entry, + brownfield shortcut, greenfield shortcut, unreachable detection, + and final rediscovery to ok status. + + Args: + serial_numbers: Switch serial numbers to monitor. + all_preserve_config: Set to ``True`` when all switches in the + batch are brownfield (``preserve_config=True``). Skips + reload detection, as brownfield switches never reload. + skip_greenfield_check: Set to ``True`` to bypass the greenfield + debug flag shortcut (required for POAP bootstrap where + the device always reboots). + + Returns: + ``True`` if all switches are manageable, ``False`` on timeout. + """ + self.log.info("Waiting for switches to become manageable: %s", serial_numbers) + + # Phase 1 + 2: migration → normal + if not self._wait_for_system_mode(serial_numbers): + return False + + # Phase 3: brownfield shortcut — no reload expected + if all_preserve_config: + self.log.info("All switches are brownfield (preserve_config=True) — skipping reload detection (phases 5-6)") + return True + + # Phase 4: greenfield shortcut (skipped for POAP bootstrap) + if not skip_greenfield_check and self._is_greenfield_debug_enabled(): + self.log.info("Greenfield debug flag enabled — skipping reload detection") + return True + + if skip_greenfield_check: + self.log.info("Greenfield debug check skipped (POAP bootstrap — device always reboots)") + + # Phase 5: wait for "unreachable" (switch is reloading) + if not self._wait_for_discovery_state(serial_numbers, "unreachable"): + return False + + # Phase 6: wait for "ok" (switch is ready) + return self._wait_for_discovery_state(serial_numbers, "ok") + + def wait_for_rma_switch_ready( + self, + serial_numbers: List[str], + ) -> bool: + """Wait for RMA replacement switches to become manageable. + + RMA replacement switches come up via POAP bootstrap and never enter + migration mode. Three phases are run in order: + + 1. Wait for each new serial to appear in the fabric inventory. + The controller registers the switch after ``provisionRMA`` + completes, but it may take a few polling cycles. + 2. Wait for discovery status ``ok``. + + Args: + serial_numbers: New (replacement) switch serial numbers to monitor. + + Returns: + ``True`` if all switches reach ``ok`` status, ``False`` on timeout. + """ + self.log.info( + "Waiting for RMA replacement switch(es) to become ready (skipping migration-mode phase): %s", + serial_numbers, + ) + + # Phase 1: wait until all new serials appear in the fabric inventory. + # Rediscovery triggers will 400 until the switch is registered. + if not self._wait_for_switches_in_fabric(serial_numbers): + return False + + # Phase 2: wait for ok discovery status. + return self._wait_for_discovery_state(serial_numbers, "ok") + + def wait_for_discovery( + self, + seed_ip: str, + max_attempts: Optional[int] = None, + wait_interval: Optional[int] = None, + ) -> Optional[Dict[str, Any]]: + """Poll until a single switch discovery completes. + + Args: + seed_ip: IP address of the switch being discovered. + max_attempts: Override max attempts (default ``30``). + wait_interval: Override interval in seconds (default ``5``). + + Returns: + Discovery data dict on success, ``None`` on failure or timeout. + """ + attempts = max_attempts or 30 + interval = wait_interval or self.wait_interval + + self.log.info("Waiting for discovery of: %s", seed_ip) + + for attempt in range(attempts): + status = self._get_discovery_status(seed_ip) + + if status and status.get("status") in self.MANAGEABLE_STATUSES: + self.log.info("Discovery completed for %s", seed_ip) + return status + + if status and status.get("status") in self.FAILED_STATUSES: + self.log.error("Discovery failed for %s: %s", seed_ip, status) + return None + + self.log.debug("Discovery attempt %s/%s for %s", attempt + 1, attempts, seed_ip) + time.sleep(interval) + + self.log.warning("Discovery timeout for %s", seed_ip) + return None + + # ===================================================================== + # Phase Helpers – System Mode + # ===================================================================== + + def _wait_for_system_mode(self, serial_numbers: List[str]) -> bool: + """Poll until all switches transition from migration mode to normal mode. + + Args: + serial_numbers: Switch serial numbers to monitor. + + Returns: + ``True`` when all switches are in ``normal`` mode, + ``False`` on timeout or API failure. + """ + # Sub-phase A: exit "migration" mode + pending = self._poll_system_mode( + serial_numbers, + target_mode="migration", + expect_match=True, + ) + if pending is None: + return False + + # Sub-phase B: enter "normal" mode + pending = self._poll_system_mode( + serial_numbers, + target_mode="normal", + expect_match=False, + ) + if pending is None: + return False + + self.log.info("All switches in normal system mode — proceeding to discovery checks") + return True + + def _poll_system_mode( + self, + serial_numbers: List[str], + target_mode: str, + expect_match: bool, + ) -> Optional[List[str]]: + """Poll until no switches remain in (or outside) ``target_mode``. + + Args: + serial_numbers: Switches to check. + target_mode: System mode string (e.g. ``"migration"``). + expect_match: When ``True``, waits for switches to leave + ``target_mode``. When ``False``, waits for + switches to enter ``target_mode``. + + Returns: + Empty list on success, ``None`` on timeout or API error. + """ + pending = list(serial_numbers) + label = f"exit '{target_mode}'" if expect_match else f"enter '{target_mode}'" + + for attempt in range(1, self.max_attempts + 1): + if not pending: + return pending + + switch_data = self._fetch_switch_data() + if switch_data is None: + return None + + remaining = self._filter_by_system_mode(pending, switch_data, target_mode, expect_match) + + if not remaining: + self.log.info("All switches %s mode (attempt %s)", label, attempt) + return remaining + + pending = remaining + self.log.debug( + "Attempt %s/%s: %s switch(es) waiting to %s: %s", + attempt, + self.max_attempts, + len(pending), + label, + pending, + ) + time.sleep(self.wait_interval * self._MIGRATION_SLEEP_FACTOR) + + self.log.warning("Timeout waiting for switches to %s: %s", label, pending) + return None + + # ===================================================================== + # Filtering (static, pure-logic helpers) + # ===================================================================== + + @staticmethod + def _filter_by_system_mode( + serial_numbers: List[str], + switch_data: List[Dict[str, Any]], + target_mode: str, + expect_match: bool, + ) -> List[str]: + """Return serial numbers that have NOT yet satisfied the mode check. + + Args: + serial_numbers: Switches to inspect. + switch_data: Raw switch dicts from the GET API. + target_mode: e.g. ``"migration"`` or ``"normal"``. + expect_match: When ``True``, waits for switches to leave + ``target_mode``. When ``False``, waits for + switches to enter ``target_mode``. + + Returns: + Serial numbers still waiting. + """ + switch_index = {sw.get("serialNumber"): sw for sw in switch_data} + remaining: List[str] = [] + for sn in serial_numbers: + sw = switch_index.get(sn) + if sw is None: + remaining.append(sn) + continue + mode = sw.get("additionalData", {}).get("systemMode", "").lower() + # expect_match=True: "still in target_mode" → not done + # expect_match=False: "not yet in target_mode" → not done + still_waiting = (mode == target_mode) if expect_match else (mode != target_mode) + if still_waiting: + remaining.append(sn) + return remaining + + @staticmethod + def _filter_by_discovery_status( + serial_numbers: List[str], + switch_data: List[Dict[str, Any]], + target_state: str, + ) -> List[str]: + """Return serial numbers not yet at ``target_state``. + + Args: + serial_numbers: Switches to inspect. + switch_data: Raw switch dicts from the GET API. + target_state: e.g. ``"unreachable"`` or ``"ok"``. + + Returns: + Serial numbers still waiting. + """ + switch_index = {sw.get("serialNumber"): sw for sw in switch_data} + remaining: List[str] = [] + for sn in serial_numbers: + sw = switch_index.get(sn) + if sw is None: + remaining.append(sn) + continue + status = sw.get("additionalData", {}).get("discoveryStatus", "").lower() + if status != target_state: + remaining.append(sn) + return remaining + + # ===================================================================== + # Phase Helpers – Discovery Status + # ===================================================================== + + def _wait_for_discovery_state( + self, + serial_numbers: List[str], + target_state: str, + ) -> bool: + """Poll until all switches reach the given discovery status. + + Triggers rediscovery on each iteration for switches that have not + yet reached the target state. + + Args: + serial_numbers: Switch serial numbers to monitor. + target_state: Expected discovery status, e.g. ``"unreachable"`` + or ``"ok"``. + + Returns: + ``True`` when all switches reach ``target_state``, + ``False`` on timeout. + """ + pending = list(serial_numbers) + + for attempt in range(1, self.max_attempts + 1): + if not pending: + return True + + switch_data = self._fetch_switch_data() + if switch_data is None: + return False + + pending = self._filter_by_discovery_status(pending, switch_data, target_state) + + if not pending: + self.log.info( + "All switches reached '%s' state (attempt %s)", + target_state, + attempt, + ) + return True + + self._trigger_rediscovery(pending) + self.log.debug( + "Attempt %s/%s: %s switch(es) not yet '%s': %s", + attempt, + self.max_attempts, + len(pending), + target_state, + pending, + ) + time.sleep(self.wait_interval * self._REDISCOVERY_SLEEP_FACTOR) + + self.log.warning("Timeout waiting for '%s' state: %s", target_state, serial_numbers) + return False + + # ===================================================================== + # API Helpers + # ===================================================================== + + def _wait_for_switches_in_fabric( + self, + serial_numbers: List[str], + ) -> bool: + """Poll until all serial numbers appear in the fabric inventory. + + After ``provisionRMA`` the controller registers the new switch + asynchronously. Rediscovery requests will fail with 400 + "Switch not found" until the switch is registered, so we must + wait for it to appear before triggering any rediscovery. + + Args: + serial_numbers: Switch serial numbers to wait for. + + Returns: + ``True`` when all serials are present, ``False`` on timeout. + """ + pending = list(serial_numbers) + self.log.info( + "Waiting for %s switch(es) to appear in fabric inventory: %s", + len(pending), + pending, + ) + + for attempt in range(1, self.max_attempts + 1): + if not pending: + return True + + switch_data = self._fetch_switch_data() + if switch_data is None: + # API error — keep waiting + time.sleep(self.wait_interval) + continue + + known_serials = {sw.get("serialNumber") for sw in switch_data} + pending = [sn for sn in pending if sn not in known_serials] + + if not pending: + self.log.info( + "All RMA switch(es) now visible in fabric inventory (attempt %s)", + attempt, + ) + return True + + self.log.debug( + "Attempt %s/%s: %s switch(es) not yet in fabric: %s", + attempt, + self.max_attempts, + len(pending), + pending, + ) + time.sleep(self.wait_interval) + + self.log.warning("Timeout waiting for switches to appear in fabric: %s", pending) + return False + + def _fetch_switch_data( + self, + ) -> Optional[List[Dict[str, Any]]]: + """GET current switch data for the fabric. + + Returns: + List of switch dicts, or ``None`` on failure. + """ + try: + response = self.nd.request( + self.ep_switches_get.path, + verb=self.ep_switches_get.verb, + ) + switch_data = response.get("switches", []) + if not switch_data: + self.log.error("No switch data returned for fabric") + return None + return switch_data + except Exception as e: + self.log.error("Failed to fetch switch data: %s", e) + return None + + def _trigger_rediscovery(self, serial_numbers: List[str]) -> None: + """POST a rediscovery request for the given switches. + + Args: + serial_numbers: Switch serial numbers to rediscover. + """ + if not serial_numbers: + return + + payload = {"switchIds": serial_numbers} + self.log.info("Triggering rediscovery for: %s", serial_numbers) + try: + self.nd.request( + self.ep_rediscover.path, + verb=self.ep_rediscover.verb, + data=payload, + ) + except Exception as e: + self.log.warning("Failed to trigger rediscovery: %s", e) + + def _get_discovery_status( + self, + seed_ip: str, + ) -> Optional[Dict[str, Any]]: + """GET discovery status for a single switch by IP. + + Args: + seed_ip: IP address of the switch. + + Returns: + Switch dict from the discovery API, or ``None``. + """ + try: + response = self.nd.request( + self.ep_inventory_discover.path, + verb=self.ep_inventory_discover.verb, + ) + for switch in response.get("switches", []): + if switch.get("ip") == seed_ip or switch.get("ipaddr") == seed_ip: + return switch + return None + except Exception as e: + self.log.debug("Discovery status check failed: %s", e) + return None + + def _is_greenfield_debug_enabled(self) -> bool: + """Check whether the fabric has the greenfield debug flag enabled. + + Uses the ``FabricUtils`` instance. Result is cached for the + lifetime of the instance. + + Returns: + ``True`` if the flag is ``"enable"``, ``False`` otherwise. + """ + if self._greenfield_debug_enabled is not None: + return self._greenfield_debug_enabled + + try: + fabric_info = self.fabric_utils.get_fabric_info() + self.log.debug("Fabric info retrieved for greenfield check: %s", fabric_info) + flag = fabric_info.get("management", {}).get("greenfieldDebugFlag", "").lower() + self.log.debug("Greenfield debug flag value: '%s'", flag) + self._greenfield_debug_enabled = flag == "enable" + except Exception as e: + self.log.debug("Failed to get greenfield debug flag: %s", e) + self._greenfield_debug_enabled = False + + return self._greenfield_debug_enabled + + +__all__ = [ + "SwitchOperationError", + "PayloadUtils", + "FabricUtils", + "SwitchWaitUtils", + "mask_password", + "get_switch_field", + "determine_operation_type", + "group_switches_by_credentials", + "query_bootstrap_switches", + "build_bootstrap_index", + "build_poap_data_block", +] diff --git a/plugins/module_utils/models/__init__.py b/plugins/module_utils/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/module_utils/models/manage_switches/__init__.py b/plugins/module_utils/models/manage_switches/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/module_utils/models/manage_switches/bootstrap_models.py b/plugins/module_utils/models/manage_switches/bootstrap_models.py new file mode 100644 index 000000000..7ff47ea45 --- /dev/null +++ b/plugins/module_utils/models/manage_switches/bootstrap_models.py @@ -0,0 +1,291 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Bootstrap (POAP) switch models for import operations. + +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Any, Dict, List, Optional, ClassVar, Literal + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + computed_field, + field_validator, + model_validator, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import ( + NDNestedModel, +) + +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( + RemoteCredentialStore, + SnmpV3AuthProtocol, + SwitchRole, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) + + +class BootstrapBaseData(NDNestedModel): + """ + Device-reported data embedded in a bootstrap API entry. + """ + + identifiers: ClassVar[List[str]] = [] + gateway_ip_mask: Optional[str] = Field(default=None, alias="gatewayIpMask", description="Gateway IP address with mask") + models: Optional[List[str]] = Field(default=None, description="Supported models for switch") + + @field_validator("gateway_ip_mask", mode="before") + @classmethod + def validate_gateway(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_cidr_optional(v) + + +class BootstrapBaseModel(NDBaseModel): + """ + Common hardware and policy properties shared across bootstrap, pre-provision, and RMA operations. + """ + + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + gateway_ip_mask: Optional[str] = Field(default=None, alias="gatewayIpMask", description="Gateway IP address with mask") + model: Optional[str] = Field(default=None, description="Model of the bootstrap switch") + software_version: Optional[str] = Field( + default=None, + alias="softwareVersion", + description="Software version of the bootstrap switch", + ) + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Image policy associated with the switch during bootstrap", + ) + switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") + data: Optional[BootstrapBaseData] = Field(default=None, description="Additional bootstrap data") + + @field_validator("gateway_ip_mask", mode="before") + @classmethod + def validate_gateway(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_cidr_optional(v) + + +class BootstrapCredentialModel(NDBaseModel): + """ + Credential properties for a switch bootstrap or pre-provision operation. + + When useNewCredentials is true, separate discovery credentials are used for + post-bootstrap switch discovery instead of the admin password. + """ + + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] + password: str = Field(description="Switch password to be set during bootstrap for admin user") + discovery_auth_protocol: SnmpV3AuthProtocol = Field(alias="discoveryAuthProtocol") + use_new_credentials: bool = Field( + default=False, + alias="useNewCredentials", + description="If True, use discoveryUsername and discoveryPassword", + ) + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername", + description="Username to be used for switch discovery post bootstrap", + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password associated with the corresponding switch discovery user", + ) + remote_credential_store: RemoteCredentialStore = Field( + default=RemoteCredentialStore.LOCAL, + alias="remoteCredentialStore", + description="Type of credential store for discovery credentials", + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key for discovery credentials", + ) + + @model_validator(mode="after") + def validate_credentials(self) -> "BootstrapCredentialModel": + """Validate credential configuration logic.""" + if self.use_new_credentials: + if self.remote_credential_store == RemoteCredentialStore.CYBERARK: + if not self.remote_credential_store_key: + raise ValueError("remote_credential_store_key is required when remote_credential_store is 'cyberark'") + elif self.remote_credential_store == RemoteCredentialStore.LOCAL: + if not self.discovery_username or not self.discovery_password: + raise ValueError( + "discovery_username and discovery_password are required when remote_credential_store is 'local' and use_new_credentials is True" + ) + return self + + +class BootstrapImportSpecificModel(NDBaseModel): + """ + Switch-identifying fields returned by the bootstrap GET API prior to import. + """ + + identifiers: ClassVar[List[str]] = ["serial_number"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + hostname: str = Field(description="Hostname of the bootstrap switch") + ip: str = Field(description="IP address of the bootstrap switch") + serial_number: str = Field(alias="serialNumber", description="Serial number of the bootstrap switch") + in_inventory: bool = Field( + alias="inInventory", + description="True if the bootstrap switch is in inventory", + ) + public_key: str = Field(alias="publicKey", description="Public Key") + finger_print: str = Field(alias="fingerPrint", description="Fingerprint") + dhcp_bootstrap_ip: Optional[str] = Field( + default=None, + alias="dhcpBootstrapIp", + description="This is used for device day-0 bring-up when using inband reachability", + ) + seed_switch: bool = Field(default=False, alias="seedSwitch", description="Use as seed switch") + + @field_validator("hostname", mode="before") + @classmethod + def validate_host(cls, v: str) -> str: + return SwitchValidators.require_hostname(v) + + @field_validator("ip", "dhcp_bootstrap_ip", mode="before") + @classmethod + def validate_ip(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_ip_address(v) + + @field_validator("serial_number", mode="before") + @classmethod + def validate_serial(cls, v: str) -> str: + return SwitchValidators.require_serial_number(v) + + +class BootstrapImportSwitchModel(NDBaseModel): + """ + Request payload for importing a single POAP bootstrap switch into the fabric. + + Path: POST /fabrics/{fabricName}/switchActions/importBootstrap + """ + + identifiers: ClassVar[List[str]] = ["serial_number"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] + + serial_number: str = Field(alias="serialNumber", description="Serial number of the bootstrap switch") + model: Optional[str] = Field(default=None, description="Model of the bootstrap switch") + software_version: Optional[str] = Field( + default=None, + alias="softwareVersion", + description="Software version of the bootstrap switch", + ) + hostname: str = Field(description="Hostname of the bootstrap switch") + ip: str = Field(description="IP address of the bootstrap switch") + password: str = Field(description="Switch password to be set during bootstrap for admin user") + discovery_auth_protocol: SnmpV3AuthProtocol = Field(alias="discoveryAuthProtocol") + discovery_username: Optional[str] = Field(default=None, alias="discoveryUsername") + discovery_password: Optional[str] = Field(default=None, alias="discoveryPassword") + remote_credential_store: RemoteCredentialStore = Field( + default=RemoteCredentialStore.LOCAL, + alias="remoteCredentialStore", + description="Type of credential store for discovery credentials", + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key for discovery credentials", + ) + data: Optional[Dict[str, Any]] = Field( + default=None, + description="Bootstrap configuration data block (gatewayIpMask, models)", + ) + fingerprint: str = Field( + default="", + alias="fingerPrint", + description="SSH fingerprint from bootstrap GET API", + ) + public_key: str = Field( + default="", + alias="publicKey", + description="SSH public key from bootstrap GET API", + ) + re_add: bool = Field( + default=False, + alias="reAdd", + description="Whether to re-add an already-seen switch", + ) + in_inventory: bool = Field(default=False, alias="inInventory") + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Image policy associated with the switch during bootstrap", + ) + switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") + gateway_ip_mask: Optional[str] = Field(default=None, alias="gatewayIpMask", description="Gateway IP address with mask") + + @field_validator("ip", mode="before") + @classmethod + def validate_ip_field(cls, v: str) -> str: + return SwitchValidators.require_ip_address(v) + + @field_validator("hostname", mode="before") + @classmethod + def validate_host(cls, v: str) -> str: + return SwitchValidators.require_hostname(v) + + @field_validator("serial_number", mode="before") + @classmethod + def validate_serial(cls, v: str) -> str: + return SwitchValidators.require_serial_number(v) + + @computed_field(alias="useNewCredentials") + @property + def use_new_credentials(self) -> bool: + """Derive useNewCredentials from discoveryUsername and discoveryPassword.""" + return bool(self.discovery_username and self.discovery_password) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format matching importBootstrap spec.""" + return self.model_dump(by_alias=True, exclude_none=True) + + @classmethod + def from_response(cls, response: Dict[str, Any]) -> "BootstrapImportSwitchModel": + """Create model instance from API response.""" + return cls.model_validate(response) + + +class ImportBootstrapSwitchesRequestModel(NDBaseModel): + """ + Request body wrapping a list of bootstrap switch payloads for bulk POAP import. + + Path: POST /fabrics/{fabricName}/switchActions/importBootstrap + """ + + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + switches: List[BootstrapImportSwitchModel] = Field(description="PowerOn Auto Provisioning switches") + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + return {"switches": [s.to_payload() for s in self.switches]} + + +__all__ = [ + "BootstrapBaseData", + "BootstrapBaseModel", + "BootstrapCredentialModel", + "BootstrapImportSpecificModel", + "BootstrapImportSwitchModel", + "ImportBootstrapSwitchesRequestModel", +] diff --git a/plugins/module_utils/models/manage_switches/config_models.py b/plugins/module_utils/models/manage_switches/config_models.py new file mode 100644 index 000000000..6e44571e5 --- /dev/null +++ b/plugins/module_utils/models/manage_switches/config_models.py @@ -0,0 +1,678 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Ansible playbook configuration models. + +These models represent the user-facing configuration schema used in Ansible +playbooks for normal switch addition, POAP, and RMA operations. + +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import socket +from ipaddress import ip_address +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + ValidationInfo, + computed_field, + field_validator, + model_validator, +) +from typing import Any, Dict, List, Optional, ClassVar, Literal, Union + +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import ( + NDNestedModel, +) + +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( + PlatformType, + SnmpV3AuthProtocol, + SwitchRole, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) + + +class ConfigDataModel(NDNestedModel): + """ + Hardware and gateway network data required for POAP and RMA operations. + + Maps to config.poap.config_data and config.rma.config_data in the playbook. + """ + + identifiers: ClassVar[List[str]] = [] + + models: List[str] = Field( + alias="models", + min_length=1, + description="List of model of modules in switch to Bootstrap/Pre-provision/RMA", + ) + gateway: str = Field(description="Gateway IP with mask for the switch (e.g., 192.168.0.1/24)") + + @field_validator("models", mode="before") + @classmethod + def validate_models_list(cls, v: Any) -> List[str]: + """Validate models is a non-empty list of strings.""" + if v is None: + raise ValueError("'models' is required in config_data. Provide a list of module model strings, e.g. models: [N9K-X9364v, N9K-vSUP]") + if not isinstance(v, list): + raise ValueError(f"'models' must be a list of module model strings, got: {type(v).__name__}. e.g. models: [N9K-X9364v, N9K-vSUP]") + if len(v) == 0: + raise ValueError("'models' list cannot be empty. Provide at least one module model string, e.g. models: [N9K-X9364v, N9K-vSUP]") + return v + + @field_validator("gateway", mode="before") + @classmethod + def validate_gateway(cls, v: str) -> str: + """Validate gateway is a valid CIDR.""" + if not v or not v.strip(): + raise ValueError("gateway cannot be empty") + return SwitchValidators.validate_cidr(v) + + +class POAPConfigModel(NDNestedModel): + """Bootstrap POAP config for a single switch. + + Used when ``poap`` is specified alone (bootstrap-only operation). + ``serial_number`` and ``hostname`` are mandatory; all other fields are optional. + Model, version, and config data are sourced from the bootstrap API at runtime. + If the bootstrap API reports a different hostname or role, the API value overrides + the user-provided value and a warning is logged. + """ + + identifiers: ClassVar[List[str]] = [] + + # Mandatory + serial_number: str = Field( + alias="serialNumber", + min_length=1, + description="Serial number of the physical switch to Bootstrap", + ) + hostname: str = Field(description="Hostname for the switch during bootstrap") + + # Optional + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername", + description="Username for device discovery during POAP", + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password for device discovery during POAP", + ) + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Name of the image policy to be applied on switch", + ) + + @field_validator("hostname", mode="before") + @classmethod + def validate_hostname_field(cls, v: str) -> str: + """Validate hostname is not empty and well-formed.""" + return SwitchValidators.require_hostname(v) + + @model_validator(mode="after") + def validate_discovery_credentials_pair(self) -> "POAPConfigModel": + """Validate that discovery_username and discovery_password are both set or both absent.""" + SwitchValidators.check_discovery_credentials_pair(self.discovery_username, self.discovery_password) + return self + + @field_validator("serial_number", mode="before") + @classmethod + def validate_serial_number_field(cls, v: str) -> str: + """Validate serial_number is not empty.""" + return SwitchValidators.require_serial_number(v) + + +class PreprovisionConfigModel(NDNestedModel): + """Pre-provision config for a single switch. + + Used when ``preprovision`` is specified alone. + All five fields — ``serial_number``, ``model``, ``version``, ``hostname``, + and ``config_data`` — are mandatory because the controller has no physical + switch to pull these values from. + """ + + identifiers: ClassVar[List[str]] = [] + + # Mandatory + serial_number: str = Field( + alias="serialNumber", + min_length=1, + description="Serial number of switch to Pre-provision", + ) + model: str = Field(min_length=1, description="Model of switch to Pre-provision") + version: str = Field(min_length=1, description="Software version of switch to Pre-provision") + hostname: str = Field(description="Hostname for the switch during pre-provision") + config_data: ConfigDataModel = Field( + alias="configData", + description=("Basic config data of switch to Pre-provision. " "'models' (list of module models) and 'gateway' (IP with mask) are mandatory."), + ) + + # Optional + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername", + description="Username for device discovery during pre-provision", + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password for device discovery during pre-provision", + ) + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Image policy to apply during pre-provision", + ) + + @field_validator("hostname", mode="before") + @classmethod + def validate_hostname_field(cls, v: str) -> str: + """Validate hostname is not empty and well-formed.""" + return SwitchValidators.require_hostname(v) + + @model_validator(mode="after") + def validate_discovery_credentials_pair(self) -> "PreprovisionConfigModel": + """Validate that discovery_username and discovery_password are both set or both absent.""" + SwitchValidators.check_discovery_credentials_pair(self.discovery_username, self.discovery_password) + return self + + @field_validator("serial_number", mode="before") + @classmethod + def validate_serial_number_field(cls, v: str) -> str: + """Validate serial_number is not empty.""" + return SwitchValidators.require_serial_number(v) + + +class RMAConfigModel(NDNestedModel): + """ + RMA configuration entry for replacing a single switch via bootstrap. + + The old switch is identified from the fabric inventory using ``seed_ip``. + All switch properties (model, version, gateway, modules) are sourced from + the bootstrap API at runtime — only the new serial number is required. + + The switch being replaced must be in maintenance mode and either shut down + or disconnected from the network before initiating the RMA operation. + """ + + identifiers: ClassVar[List[str]] = [] + + # Required + new_serial_number: str = Field( + alias="newSerialNumber", + min_length=1, + description="Serial number of the replacement switch to bootstrap for RMA", + ) + + # Optional + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Name of the image policy to be applied on the replacement switch", + ) + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername", + description="Username for device discovery during RMA bootstrap", + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password for device discovery during RMA bootstrap", + ) + + @field_validator("new_serial_number", mode="before") + @classmethod + def validate_serial_numbers(cls, v: str) -> str: + """Validate new_serial_number is not empty.""" + return SwitchValidators.require_serial_number(v, "new_serial_number") + + @model_validator(mode="after") + def validate_discovery_credentials_pair(self) -> "RMAConfigModel": + """Validate that discovery_username and discovery_password are both set or both absent.""" + SwitchValidators.check_discovery_credentials_pair(self.discovery_username, self.discovery_password) + return self + + +class SwitchConfigModel(NDBaseModel): + """ + Per-switch configuration entry in the Ansible playbook config list. + + Supports normal switch addition, POAP (Bootstrap), Pre-provision, Swap + (both poap+preprovision), and RMA operations. The operation type is derived + from the presence of poap, preprovision, and/or rma fields. + """ + + identifiers: ClassVar[List[str]] = ["seed_ip"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + + # Fields excluded from diff — only seed_ip + role are compared + exclude_from_diff: ClassVar[List[str]] = [ + "username", + "password", + "auth_proto", + "preserve_config", + "platform_type", + "poap", + "preprovision", + "rma", + "operation_type", + ] + + # Required fields + seed_ip: str = Field( + alias="seedIp", + min_length=1, + description="Seed IP address or DNS name of the switch", + ) + + # Optional fields — required for merged/overridden, optional for query/deleted + username: Optional[str] = Field( + default=None, + alias="userName", + description="Login username to the switch (required for merged/overridden states)", + ) + password: Optional[str] = Field( + default=None, + description="Login password to the switch (required for merged/overridden states)", + ) + # Optional fields with defaults + auth_proto: SnmpV3AuthProtocol = Field( + default=SnmpV3AuthProtocol.MD5, + alias="authProto", + description="Authentication protocol to use", + ) + role: Optional[SwitchRole] = Field( + default=None, + description="Role to assign to the switch. None means not specified (uses controller default).", + ) + preserve_config: bool = Field( + default=False, + alias="preserveConfig", + description="Set to false for greenfield, true for brownfield deployment", + ) + platform_type: PlatformType = Field( + default=PlatformType.NX_OS, + alias="platformType", + description="Platform type of the switch (nx-os, ios-xe, etc.)", + ) + + # POAP, Pre-provision and RMA configurations + poap: Optional[POAPConfigModel] = Field( + default=None, + description="Bootstrap POAP config (serial_number + hostname mandatory)", + ) + preprovision: Optional[PreprovisionConfigModel] = Field( + default=None, + description="Pre-provision config (serial_number, model, version, hostname, config_data all mandatory)", + ) + rma: Optional[List[RMAConfigModel]] = Field( + default=None, + description="RMA (Return Material Authorization) configurations for switch replacement", + ) + + # Computed fields + + @computed_field + @property + def operation_type( + self, + ) -> Literal["normal", "poap", "preprovision", "swap", "rma"]: + """Determine the operation type from this config. + + Returns: + ``'swap'`` if both poap and preprovision are present, + ``'poap'`` if only bootstrap poap is present, + ``'preprovision'`` if only preprovision is present, + ``'rma'`` if RMA configs are present, + ``'normal'`` otherwise. + """ + if self.poap and self.preprovision: + return "swap" + if self.poap: + return "poap" + if self.preprovision: + return "preprovision" + if self.rma: + return "rma" + return "normal" + + def to_config_dict(self) -> Dict[str, Any]: + """Return the playbook config as a dict with all credentials stripped. + + Returns: + Dict of config fields with ``username``, ``password``, + ``discovery_username``, and ``discovery_password`` excluded. + """ + return self.to_config( + exclude={ + "username": True, + "password": True, + "poap": {"discovery_username": True, "discovery_password": True}, + "preprovision": { + "discovery_username": True, + "discovery_password": True, + }, + "rma": {"__all__": {"discovery_username": True, "discovery_password": True}}, + } + ) + + @model_validator(mode="after") + def reject_auth_proto_for_special_ops(self) -> "SwitchConfigModel": + """Reject non-MD5 auth_proto when POAP, Pre-provision, Swap or RMA is configured. + + These operations always use MD5 internally. By validating mode='after', + all inputs have already been coerced by Pydantic into a typed + SnmpV3AuthProtocol value, so a direct enum comparison is safe. + """ + if (self.poap or self.preprovision or self.rma) and self.auth_proto != SnmpV3AuthProtocol.MD5: + if self.poap or self.preprovision: + op = "POAP/Pre-provision" + else: + op = "RMA" + raise ValueError( + f"'auth_proto' must not be specified for {op} operations. " + f"The authentication protocol is always MD5 and is set " + f"automatically. Received: '{self.auth_proto.value}'" + ) + return self + + @model_validator(mode="after") + def validate_special_ops_exclusion(self) -> "SwitchConfigModel": + """Validate mutually exclusive operation combinations. + + Allowed: + - poap only (Bootstrap) + - preprovision only (Pre-provision) + - poap + preprovision (Swap) + - rma (RMA) + Not allowed: + - rma combined with poap or preprovision + """ + if self.rma and (self.poap or self.preprovision): + raise ValueError("Cannot specify 'rma' together with 'poap' or 'preprovision' for the same switch") + return self + + @model_validator(mode="after") + def validate_special_ops_credentials(self) -> "SwitchConfigModel": + """Validate credentials for POAP, Pre-provision, Swap and RMA operations.""" + if self.poap or self.preprovision or self.rma: + if not self.username or not self.password: + raise ValueError("For POAP, Pre-provision, and RMA operations, username and password are required") + if self.username != "admin": + raise ValueError("For POAP, Pre-provision, and RMA operations, username should be 'admin'") + return self + + @model_validator(mode="after") + def apply_state_defaults(self, info: ValidationInfo) -> "SwitchConfigModel": + """Apply state-aware defaults and enforcement using validation context. + + When ``context={"state": "merged"}`` (or ``"overridden"``) is passed + to ``model_validate()``, the model: + - Defaults ``role`` to ``SwitchRole.LEAF`` when not specified. + - Enforces that ``username`` and ``password`` are provided. + + For ``query`` / ``deleted`` (or no context), fields remain as-is. + """ + state = (info.context or {}).get("state") if info else None + + # RMA only allowed with merged + if self.rma and state not in (None, "merged"): + raise ValueError(f"RMA operations require 'merged' state, " f"got '{state}' (switch: {self.seed_ip})") + + if state in ("merged", "overridden", "replaced"): + if self.role is None: + self.role = SwitchRole.LEAF + if not self.username or not self.password: + raise ValueError(f"username and password are required " f"for '{state}' state " f"(switch: {self.seed_ip})") + return self + + @field_validator("seed_ip", mode="before") + @classmethod + def validate_seed_ip(cls, v: str) -> str: + """Resolve seed_ip to an IP address. + + Accepts IPv4, IPv6, or a DNS name / hostname. When the input + is not a valid IP address a DNS lookup is performed and the + resolved IPv4 address is returned so that downstream code + always works with a clean IP. + """ + if not v or not v.strip(): + raise ValueError("seed_ip cannot be empty") + + v = v.strip() + + # Fast path: already a valid IP address + try: + ip_address(v) + return v + except ValueError: + pass + + # Not an IP — attempt DNS resolution (IPv4 first, then IPv6) + for family in (socket.AF_INET, socket.AF_INET6): + try: + addr_info = socket.getaddrinfo(v, None, family) + if addr_info: + return addr_info[0][4][0] + except socket.gaierror: + continue + + raise ValueError(f"'{v}' is not a valid IP address and could not be resolved via DNS") + + @field_validator("rma", mode="before") + @classmethod + def validate_rma_list_not_empty(cls, v: Optional[List]) -> Optional[List]: + """Validate that if RMA list is provided, it is not empty.""" + if v is not None and len(v) == 0: + raise ValueError("RMA list cannot be empty if provided") + return v + + @field_validator("auth_proto", mode="before") + @classmethod + def normalize_auth_proto(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: + """Normalize auth_proto to handle case-insensitive input (MD5, md5, etc.).""" + return SnmpV3AuthProtocol.normalize(v) + + @field_validator("role", mode="before") + @classmethod + def normalize_role(cls, v: Union[str, SwitchRole, None]) -> Optional[SwitchRole]: + """Normalize role for case-insensitive and underscore-to-camelCase matching. + Returns None when not specified (distinguishes from explicit 'leaf').""" + if v is None: + return None + return SwitchRole.normalize(v) + + @field_validator("platform_type", mode="before") + @classmethod + def normalize_platform_type(cls, v: Union[str, PlatformType, None]) -> PlatformType: + """Normalize platform_type for case-insensitive matching (NX_OS, nx-os, etc.).""" + return PlatformType.normalize(v) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + return self.model_dump( + by_alias=True, + exclude_none=True, + ) + + @classmethod + def from_switch_data(cls, sw: Any) -> "SwitchConfigModel": + """Build a config-shaped entry from a live inventory record. + + Only the fields recoverable from the ND inventory API are populated. + Credentials (username, password) are intentionally omitted. + + Args: + sw: A SwitchDataModel instance from the fabric inventory. + + Returns: + SwitchConfigModel instance with seed_ip, role, and platform_type + populated from live data. + + Raises: + ValueError: If the inventory record is missing a management IP, + making it impossible to construct a valid config entry. + """ + if not sw.fabric_management_ip: + raise ValueError(f"Switch {sw.switch_id!r} has no fabric_management_ip — " "cannot build a gathered config entry without a seed IP.") + + platform_type = sw.additional_data.platform_type if sw.additional_data and hasattr(sw.additional_data, "platform_type") else None + + data: Dict[str, Any] = {"seed_ip": sw.fabric_management_ip} + if sw.switch_role is not None: + data["role"] = sw.switch_role + if platform_type is not None: + data["platform_type"] = platform_type + + return cls.model_validate(data) + + def to_gathered_dict(self) -> Dict[str, Any]: + """Return a config dict suitable for gathered output. + + platform_type is excluded (internal detail not needed by the user). + username and password are replaced with placeholders so the returned + data is immediately usable as ``config:`` input after substituting + real credentials. + + Returns: + Dict with seed_ip, role, auth_proto, preserve_config, + username set to ``""``, password set to ``""``. + """ + result = self.to_config() + for key in ("platform_type", "poap", "preprovision", "rma", "operation_type"): + result.pop(key, None) + result["username"] = "" + result["password"] = "" + return result + + @classmethod + def get_argument_spec(cls) -> Dict[str, Any]: + """Return the Ansible argument spec for nd_manage_switches.""" + return dict( + fabric=dict(type="str", required=True), + state=dict( + type="str", + default="merged", + choices=["merged", "replaced", "overridden", "deleted", "gathered"], + ), + config_actions=dict( + type="dict", + options=dict( + save=dict(type="bool", default=True), + deploy=dict(type="bool", default=True), + type=dict( + type="str", + default="switch", + choices=["switch", "global"], + ), + ), + ), + config=dict( + type="list", + elements="dict", + options=dict( + seed_ip=dict(type="str", required=True), + username=dict(type="str"), + password=dict(type="str", no_log=True), + auth_proto=dict( + type="str", + default="MD5", + choices=[ + "MD5", + "SHA", + "MD5_DES", + "MD5_AES", + "SHA_DES", + "SHA_AES", + ], + ), + role=dict( + type="str", + default="leaf", + choices=[ + "leaf", + "spine", + "border", + "border_spine", + "border_gateway", + "border_gateway_spine", + "super_spine", + "border_super_spine", + "border_gateway_super_spine", + "access", + "aggregation", + "edge_router", + "core_router", + "tor", + ], + ), + preserve_config=dict(type="bool", default=False), + poap=dict( + type="dict", + options=dict( + serial_number=dict(type="str", required=True), + hostname=dict(type="str", required=True), + discovery_username=dict(type="str"), + discovery_password=dict(type="str", no_log=True), + image_policy=dict(type="str"), + ), + ), + preprovision=dict( + type="dict", + options=dict( + serial_number=dict(type="str", required=True), + model=dict(type="str", required=True), + version=dict(type="str", required=True), + hostname=dict(type="str", required=True), + discovery_username=dict(type="str"), + discovery_password=dict(type="str", no_log=True), + image_policy=dict(type="str"), + config_data=dict( + type="dict", + required=True, + options=dict( + models=dict( + type="list", + elements="str", + required=True, + ), + gateway=dict(type="str", required=True), + ), + ), + ), + ), + rma=dict( + type="list", + elements="dict", + options=dict( + new_serial_number=dict(type="str", required=True), + discovery_username=dict(type="str"), + discovery_password=dict(type="str", no_log=True), + image_policy=dict(type="str"), + ), + ), + ), + ), + ) + + +__all__ = [ + "ConfigDataModel", + "POAPConfigModel", + "PreprovisionConfigModel", + "RMAConfigModel", + "SwitchConfigModel", +] diff --git a/plugins/module_utils/models/manage_switches/discovery_models.py b/plugins/module_utils/models/manage_switches/discovery_models.py new file mode 100644 index 000000000..2875ce46d --- /dev/null +++ b/plugins/module_utils/models/manage_switches/discovery_models.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Switch discovery models for shallow discovery and fabric add operations. + +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Any, Dict, List, Optional, ClassVar, Literal, Union + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + field_validator, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel + +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( + PlatformType, + RemoteCredentialStore, + ShallowDiscoveryPlatformType, + SnmpV3AuthProtocol, + SwitchRole, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) + + +class ShallowDiscoveryRequestModel(NDBaseModel): + """ + Initiates a shallow CDP/LLDP-based discovery from one or more seed IP addresses. + + Path: POST /fabrics/{fabricName}/actions/shallowDiscovery + """ + + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + exclude_from_diff: ClassVar[List[str]] = ["password"] + seed_ip_collection: List[str] = Field( + alias="seedIpCollection", + min_length=1, + description="Seed switch IP collection", + ) + max_hop: int = Field(default=2, alias="maxHop", ge=0, le=7, description="Max hop") + platform_type: ShallowDiscoveryPlatformType = Field( + default=ShallowDiscoveryPlatformType.NX_OS, + alias="platformType", + description="Switch platform type (apic is not supported for shallow discovery)", + ) + snmp_v3_auth_protocol: SnmpV3AuthProtocol = Field( + default=SnmpV3AuthProtocol.MD5, + alias="snmpV3AuthProtocol", + description="SNMPv3 authentication protocols", + ) + username: Optional[str] = Field(default=None, description="User name for switch login") + password: Optional[str] = Field(default=None, description="User password for switch login") + remote_credential_store: Optional[RemoteCredentialStore] = Field( + default=None, + alias="remoteCredentialStore", + description="Type of credential store", + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key", + ) + + @field_validator("seed_ip_collection", mode="before") + @classmethod + def validate_seed_ips(cls, v: List[str]) -> List[str]: + """Validate all seed IPs.""" + if not v: + raise ValueError("At least one seed IP is required") + validated = [] + for ip in v: + result = SwitchValidators.validate_ip_address(ip) + if result: + validated.append(result) + if not validated: + raise ValueError("No valid seed IPs provided") + return validated + + @field_validator("snmp_v3_auth_protocol", mode="before") + @classmethod + def normalize_snmp_auth(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: + """Normalize SNMP auth protocol (case-insensitive).""" + return SnmpV3AuthProtocol.normalize(v) + + @field_validator("platform_type", mode="before") + @classmethod + def normalize_platform(cls, v: Union[str, ShallowDiscoveryPlatformType, None]) -> ShallowDiscoveryPlatformType: + """Normalize platform type (case-insensitive).""" + return ShallowDiscoveryPlatformType.normalize(v) + + +class SwitchDiscoveryModel(NDBaseModel): + """ + Discovery data for a single switch returned by the shallow discovery API. + + For N7K user VDC deployments, the serial number format is serialNumber:vDCName. + """ + + identifiers: ClassVar[List[str]] = ["serial_number"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + hostname: str = Field(description="Switch host name") + ip: str = Field(description="Switch IPv4/v6 address") + serial_number: str = Field(alias="serialNumber", description="Switch serial number") + model: str = Field(description="Switch model") + software_version: Optional[str] = Field(default=None, alias="softwareVersion", description="Switch software version") + vdc_id: Optional[int] = Field( + default=None, + alias="vdcId", + ge=0, + description="N7K VDC ID. Mandatory for N7K switch discovery", + ) + vdc_mac: Optional[str] = Field( + default=None, + alias="vdcMac", + description="N7K VDC Mac address. Mandatory for N7K switch discovery", + ) + switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole", description="Switch role") + + @field_validator("hostname", mode="before") + @classmethod + def validate_host(cls, v: str) -> str: + return SwitchValidators.require_hostname(v) + + @field_validator("ip", mode="before") + @classmethod + def validate_ip(cls, v: str) -> str: + return SwitchValidators.require_ip_address(v) + + @field_validator("serial_number", mode="before") + @classmethod + def validate_serial(cls, v: str) -> str: + return SwitchValidators.require_serial_number(v) + + @field_validator("vdc_mac", mode="before") + @classmethod + def validate_mac(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_mac_address(v) + + +class AddSwitchesRequestModel(NDBaseModel): + """ + Imports one or more previously discovered switches into a fabric. + + Path: POST /fabrics/{fabricName}/switches + """ + + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + exclude_from_diff: ClassVar[List[str]] = ["password"] + switches: List[SwitchDiscoveryModel] = Field(min_length=1, description="The list of switches to be imported") + platform_type: PlatformType = Field( + default=PlatformType.NX_OS, + alias="platformType", + description="Switch platform type", + ) + preserve_config: bool = Field( + default=True, + alias="preserveConfig", + description="Flag to preserve the switch configuration after import", + ) + snmp_v3_auth_protocol: SnmpV3AuthProtocol = Field( + default=SnmpV3AuthProtocol.MD5, + alias="snmpV3AuthProtocol", + description="SNMPv3 authentication protocols", + ) + use_credential_for_write: Optional[bool] = Field( + default=None, + alias="useCredentialForWrite", + description="Flag to use the discovery credential as LAN credential", + ) + username: Optional[str] = Field(default=None, description="User name for switch login") + password: Optional[str] = Field(default=None, description="User password for switch login") + remote_credential_store: Optional[RemoteCredentialStore] = Field( + default=None, + alias="remoteCredentialStore", + description="Type of credential store", + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key", + ) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + payload = self.model_dump(by_alias=True, exclude_none=True) + # Convert nested switches to payload format + if "switches" in payload: + payload["switches"] = [s.to_payload() if hasattr(s, "to_payload") else s for s in self.switches] + return payload + + @field_validator("snmp_v3_auth_protocol", mode="before") + @classmethod + def normalize_snmp_auth(cls, v: Union[str, SnmpV3AuthProtocol, None]) -> SnmpV3AuthProtocol: + """Normalize SNMP auth protocol (case-insensitive: MD5, md5, etc.).""" + return SnmpV3AuthProtocol.normalize(v) + + @field_validator("platform_type", mode="before") + @classmethod + def normalize_platform_type(cls, v: Union[str, PlatformType, None]) -> PlatformType: + """Normalize platform type (case-insensitive: NX_OS, nx-os, etc.).""" + return PlatformType.normalize(v) + + +__all__ = [ + "ShallowDiscoveryRequestModel", + "SwitchDiscoveryModel", + "AddSwitchesRequestModel", +] diff --git a/plugins/module_utils/models/manage_switches/enums.py b/plugins/module_utils/models/manage_switches/enums.py new file mode 100644 index 000000000..23a54d010 --- /dev/null +++ b/plugins/module_utils/models/manage_switches/enums.py @@ -0,0 +1,379 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Enumerations for Switch and Inventory Operations. + +Extracted from OpenAPI schema (manage.json) for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from enum import Enum +from typing import List, Union + +# ============================================================================= +# ENUMS - Extracted from OpenAPI Schema components/schemas +# ============================================================================= + + +class SwitchRole(str, Enum): + """ + Switch role enumeration. + + Based on: components/schemas/switchRole + Description: The role of the switch, meta is a read-only switch role + """ + + BORDER = "border" + BORDER_GATEWAY = "borderGateway" + BORDER_GATEWAY_SPINE = "borderGatewaySpine" + BORDER_GATEWAY_SUPER_SPINE = "borderGatewaySuperSpine" + BORDER_SPINE = "borderSpine" + BORDER_SUPER_SPINE = "borderSuperSpine" + LEAF = "leaf" + SPINE = "spine" + SUPER_SPINE = "superSpine" + TIER2_LEAF = "tier2Leaf" + TOR = "tor" + ACCESS = "access" + AGGREGATION = "aggregation" + CORE_ROUTER = "coreRouter" + EDGE_ROUTER = "edgeRouter" + META = "meta" # read-only + NEIGHBOR = "neighbor" + + @classmethod + def choices(cls) -> List[str]: + """Return list of valid choices.""" + return [e.value for e in cls] + + @classmethod + def from_user_input(cls, value: str) -> "SwitchRole": + """ + Convert user-friendly input to enum value. + Accepts underscore-separated values like 'border_gateway' -> 'borderGateway' + """ + if not value: + return cls.LEAF + # Try direct match first + try: + return cls(value) + except ValueError: + pass + # Try converting underscore to camelCase + parts = value.lower().split("_") + camel_case = parts[0] + "".join(word.capitalize() for word in parts[1:]) + try: + return cls(camel_case) + except ValueError: + raise ValueError(f"Invalid switch role: {value}. Valid options: {cls.choices()}") + + @classmethod + def normalize(cls, value: Union[str, "SwitchRole", None]) -> "SwitchRole": + """ + Normalize input to enum value (case-insensitive). + Accepts: LEAF, leaf, border_gateway, borderGateway, etc. + """ + if value is None: + return cls.LEAF + if isinstance(value, cls): + return value + if isinstance(value, str): + v_lower = value.lower() + # Try direct match with lowercase + for role in cls: + if role.value.lower() == v_lower: + return role + # Try converting underscore to camelCase + parts = v_lower.split("_") + if len(parts) > 1: + camel_case = parts[0] + "".join(word.capitalize() for word in parts[1:]) + for role in cls: + if role.value == camel_case: + return role + raise ValueError(f"Invalid SwitchRole: {value}. Valid: {cls.choices()}") + + +class SystemMode(str, Enum): + """ + System mode enumeration. + + Based on: components/schemas/systemMode + """ + + NORMAL = "normal" + MAINTENANCE = "maintenance" + MIGRATION = "migration" + INCONSISTENT = "inconsistent" + WAITING = "waiting" + NOT_APPLICABLE = "notApplicable" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class PlatformType(str, Enum): + """ + Switch platform type enumeration. + + Used for POST /fabrics/{fabricName}/switches (AddSwitches). + Includes all platform types supported by the add-switches endpoint. + Based on: components/schemas + """ + + NX_OS = "nx-os" + OTHER = "other" + IOS_XE = "ios-xe" + IOS_XR = "ios-xr" + SONIC = "sonic" + APIC = "apic" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + @classmethod + def normalize(cls, value: Union[str, "PlatformType", None]) -> "PlatformType": + """ + Normalize input to enum value (case-insensitive). + Accepts: NX_OS, nx-os, NX-OS, ios_xe, ios-xe, etc. + """ + if value is None: + return cls.NX_OS + if isinstance(value, cls): + return value + if isinstance(value, str): + v_normalized = value.lower().replace("_", "-") + for pt in cls: + if pt.value == v_normalized: + return pt + raise ValueError(f"Invalid PlatformType: {value}. Valid: {cls.choices()}") + + +class ShallowDiscoveryPlatformType(str, Enum): + """ + Platform type for shallow discovery. + + Used for POST /fabrics/{fabricName}/actions/shallowDiscovery only. + Excludes 'apic' which is not supported by the shallowDiscovery endpoint. + Based on: components/schemas/shallowDiscoveryRequest.platformType + """ + + NX_OS = "nx-os" + OTHER = "other" + IOS_XE = "ios-xe" + IOS_XR = "ios-xr" + SONIC = "sonic" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + @classmethod + def normalize(cls, value: Union[str, "ShallowDiscoveryPlatformType", None]) -> "ShallowDiscoveryPlatformType": + """ + Normalize input to enum value (case-insensitive). + Accepts: NX_OS, nx-os, NX-OS, ios_xe, ios-xe, etc. + """ + if value is None: + return cls.NX_OS + if isinstance(value, cls): + return value + if isinstance(value, str): + v_normalized = value.lower().replace("_", "-") + for pt in cls: + if pt.value == v_normalized: + return pt + raise ValueError(f"Invalid ShallowDiscoveryPlatformType: {value}. Valid: {cls.choices()}") + + +class SnmpV3AuthProtocol(str, Enum): + """ + SNMPv3 authentication protocols. + + Based on: components/schemas/snmpV3AuthProtocol and schemas-snmpV3AuthProtocol + """ + + MD5 = "md5" + SHA = "sha" + MD5_DES = "md5-des" + MD5_AES = "md5-aes" + SHA_AES = "sha-aes" + SHA_DES = "sha-des" + SHA_AES_256 = "sha-aes-256" + SHA_224 = "sha-224" + SHA_224_AES = "sha-224-aes" + SHA_224_AES_256 = "sha-224-aes-256" + SHA_256 = "sha-256" + SHA_256_AES = "sha-256-aes" + SHA_256_AES_256 = "sha-256-aes-256" + SHA_384 = "sha-384" + SHA_384_AES = "sha-384-aes" + SHA_384_AES_256 = "sha-384-aes-256" + SHA_512 = "sha-512" + SHA_512_AES = "sha-512-aes" + SHA_512_AES_256 = "sha-512-aes-256" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + @classmethod + def normalize(cls, value: Union[str, "SnmpV3AuthProtocol", None]) -> "SnmpV3AuthProtocol": + """ + Normalize input to enum value (case-insensitive). + Accepts: MD5, md5, MD5_DES, md5-des, etc. + """ + if value is None: + return cls.MD5 + if isinstance(value, cls): + return value + if isinstance(value, str): + v_normalized = value.lower().replace("_", "-") + for proto in cls: + if proto.value == v_normalized: + return proto + raise ValueError(f"Invalid SnmpV3AuthProtocol: {value}. Valid: {cls.choices()}") + + +class DiscoveryStatus(str, Enum): + """ + Switch discovery status. + + Based on: components/schemas/additionalSwitchData.discoveryStatus + """ + + OK = "ok" + DISCOVERING = "discovering" + REDISCOVERING = "rediscovering" + DEVICE_SHUTTING_DOWN = "deviceShuttingDown" + UNREACHABLE = "unreachable" + IP_ADDRESS_CHANGE = "ipAddressChange" + DISCOVERY_TIMEOUT = "discoveryTimeout" + RETRYING = "retrying" + SSH_SESSION_ERROR = "sshSessionError" + TIMEOUT = "timeout" + UNKNOWN_USER_PASSWORD = "unknownUserPassword" + CONNECTION_ERROR = "connectionError" + NOT_APPLICABLE = "notApplicable" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class ConfigSyncStatus(str, Enum): + """ + Configuration sync status. + + Based on: components/schemas/switchConfigSyncStatus + """ + + DEPLOYED = "deployed" + DEPLOYMENT_IN_PROGRESS = "deploymentInProgress" + FAILED = "failed" + IN_PROGRESS = "inProgress" + IN_SYNC = "inSync" + NOT_APPLICABLE = "notApplicable" + OUT_OF_SYNC = "outOfSync" + PENDING = "pending" + PREVIEW_IN_PROGRESS = "previewInProgress" + SUCCESS = "success" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class VpcRole(str, Enum): + """ + VPC role enumeration. + + Based on: components/schemas/schemas-vpcRole + """ + + PRIMARY = "primary" + SECONDARY = "secondary" + OPERATIONAL_PRIMARY = "operationalPrimary" + OPERATIONAL_SECONDARY = "operationalSecondary" + NONE_ESTABLISHED = "noneEstablished" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class RemoteCredentialStore(str, Enum): + """ + Remote credential store type. + + Based on: components/schemas/remoteCredentialStore + """ + + LOCAL = "local" + CYBERARK = "cyberark" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class AnomalyLevel(str, Enum): + """ + Anomaly level classification. + + Based on: components/schemas/anomalyLevel + """ + + CRITICAL = "critical" + MAJOR = "major" + MINOR = "minor" + WARNING = "warning" + HEALTHY = "healthy" + NOT_APPLICABLE = "notApplicable" + UNKNOWN = "unknown" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +class AdvisoryLevel(str, Enum): + """ + Advisory level classification. + + Based on: components/schemas/advisoryLevel + """ + + CRITICAL = "critical" + MAJOR = "major" + MINOR = "minor" + WARNING = "warning" + HEALTHY = "healthy" + NONE = "none" + NOT_APPLICABLE = "notApplicable" + + @classmethod + def choices(cls) -> List[str]: + return [e.value for e in cls] + + +__all__ = [ + "SwitchRole", + "SystemMode", + "PlatformType", + "ShallowDiscoveryPlatformType", + "SnmpV3AuthProtocol", + "DiscoveryStatus", + "ConfigSyncStatus", + "VpcRole", + "RemoteCredentialStore", + "AnomalyLevel", + "AdvisoryLevel", +] diff --git a/plugins/module_utils/models/manage_switches/preprovision_models.py b/plugins/module_utils/models/manage_switches/preprovision_models.py new file mode 100644 index 000000000..597eebab3 --- /dev/null +++ b/plugins/module_utils/models/manage_switches/preprovision_models.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Pre-provision switch models. + +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Any, Dict, List, Optional, ClassVar, Literal + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + computed_field, + field_validator, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel + +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( + RemoteCredentialStore, + SnmpV3AuthProtocol, + SwitchRole, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) + + +class PreProvisionSwitchModel(NDBaseModel): + """ + Request payload for pre-provisioning a single switch in the fabric. + + Path: POST /fabrics/{fabricName}/switchActions/preProvision + """ + + identifiers: ClassVar[List[str]] = ["serial_number"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] + + # --- preProvisionSpecific fields (required) --- + serial_number: str = Field( + alias="serialNumber", + description="Serial number of the switch to pre-provision", + ) + hostname: str = Field( + description="Hostname of the switch to pre-provision", + ) + ip: str = Field( + description="IP address of the switch to pre-provision", + ) + + # --- preProvisionSpecific fields (optional) --- + dhcp_bootstrap_ip: Optional[str] = Field( + default=None, + alias="dhcpBootstrapIp", + description="Used for device day-0 bring-up when using inband reachability", + ) + seed_switch: bool = Field( + default=False, + alias="seedSwitch", + description="Use as seed switch", + ) + + # --- bootstrapBase fields (required) --- + model: str = Field( + description="Model of the switch to pre-provision", + ) + software_version: str = Field( + alias="softwareVersion", + description="Software version of the switch to pre-provision", + ) + gateway_ip_mask: str = Field( + alias="gatewayIpMask", + description="Gateway IP address with mask (e.g., 10.23.244.1/24)", + ) + + # --- bootstrapBase fields (optional) --- + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Image policy associated with the switch during pre-provision", + ) + switch_role: Optional[SwitchRole] = Field( + default=None, + alias="switchRole", + description="Role to assign to the switch", + ) + data: Optional[Dict[str, Any]] = Field( + default=None, + description="Pre-provision configuration data block (gatewayIpMask, models)", + ) + + # --- bootstrapCredential fields (required) --- + password: str = Field( + description="Switch password to be set during pre-provision for admin user", + ) + discovery_auth_protocol: SnmpV3AuthProtocol = Field( + alias="discoveryAuthProtocol", + description="SNMP authentication protocol for discovery", + ) + + # --- bootstrapCredential fields (optional) --- + discovery_username: Optional[str] = Field( + default=None, + alias="discoveryUsername", + description="Username for switch discovery post pre-provision", + ) + discovery_password: Optional[str] = Field( + default=None, + alias="discoveryPassword", + description="Password for switch discovery post pre-provision", + ) + remote_credential_store: RemoteCredentialStore = Field( + default=RemoteCredentialStore.LOCAL, + alias="remoteCredentialStore", + description="Type of credential store for discovery credentials", + ) + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key for discovery credentials", + ) + + # --- Validators --- + + @field_validator("ip", "dhcp_bootstrap_ip", mode="before") + @classmethod + def validate_ip(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_ip_address(v) + + @field_validator("hostname", mode="before") + @classmethod + def validate_host(cls, v: str) -> str: + return SwitchValidators.require_hostname(v) + + @field_validator("serial_number", mode="before") + @classmethod + def validate_serial(cls, v: str) -> str: + return SwitchValidators.require_serial_number(v) + + @field_validator("gateway_ip_mask", mode="before") + @classmethod + def validate_gateway(cls, v: str) -> str: + result = SwitchValidators.validate_cidr(v) + if result is None: + raise ValueError("gatewayIpMask must include subnet mask (e.g., 10.23.244.1/24)") + return result + + @computed_field(alias="useNewCredentials") + @property + def use_new_credentials(self) -> bool: + """Derive useNewCredentials from discoveryUsername and discoveryPassword.""" + return bool(self.discovery_username and self.discovery_password) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format matching preProvision spec.""" + return self.model_dump(by_alias=True, exclude_none=True) + + @classmethod + def from_response(cls, response: Dict[str, Any]) -> "PreProvisionSwitchModel": + """Create model instance from API response.""" + return cls.model_validate(response) + + +class PreProvisionSwitchesRequestModel(NDBaseModel): + """ + Request body wrapping a list of pre-provision payloads for bulk switch pre-provisioning. + + Path: POST /fabrics/{fabricName}/switchActions/preProvision + """ + + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + switches: List[PreProvisionSwitchModel] = Field( + description="PowerOn Auto Provisioning switches", + ) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + return {"switches": [s.to_payload() for s in self.switches]} + + +__all__ = [ + "PreProvisionSwitchModel", + "PreProvisionSwitchesRequestModel", +] diff --git a/plugins/module_utils/models/manage_switches/rma_models.py b/plugins/module_utils/models/manage_switches/rma_models.py new file mode 100644 index 000000000..48b2d89b0 --- /dev/null +++ b/plugins/module_utils/models/manage_switches/rma_models.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""RMA (Return Material Authorization) switch models. + +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Any, Dict, List, Optional, ClassVar, Literal + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + computed_field, + field_validator, + model_validator, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel + +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( + RemoteCredentialStore, + SnmpV3AuthProtocol, + SwitchRole, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) + + +class RMASwitchModel(NDBaseModel): + """ + Request payload for provisioning a replacement (RMA) switch via bootstrap. + + Path: POST /fabrics/{fabricName}/switches/{switchId}/actions/provisionRMA + """ + + identifiers: ClassVar[List[str]] = ["new_switch_id"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + exclude_from_diff: ClassVar[List[str]] = ["password", "discovery_password"] + # From bootstrapBase (all sourced from bootstrap API, not user config) + gateway_ip_mask: Optional[str] = Field(default=None, alias="gatewayIpMask", description="Gateway IP address with mask") + model: Optional[str] = Field(default=None, description="Model of the bootstrap switch") + software_version: Optional[str] = Field( + default=None, + alias="softwareVersion", + description="Software version of the bootstrap switch", + ) + image_policy: Optional[str] = Field( + default=None, + alias="imagePolicy", + description="Image policy associated with the switch during bootstrap", + ) + switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") + + # From bootstrapCredential + password: str = Field(description="Switch password to be set during bootstrap for admin user") + discovery_auth_protocol: SnmpV3AuthProtocol = Field(alias="discoveryAuthProtocol") + discovery_username: Optional[str] = Field(default=None, alias="discoveryUsername") + discovery_password: Optional[str] = Field(default=None, alias="discoveryPassword") + remote_credential_store: RemoteCredentialStore = Field(default=RemoteCredentialStore.LOCAL, alias="remoteCredentialStore") + remote_credential_store_key: Optional[str] = Field(default=None, alias="remoteCredentialStoreKey") + + # From RMASpecific + hostname: str = Field(description="Hostname of the switch") + ip: str = Field(description="IP address of the switch") + new_switch_id: str = Field(alias="newSwitchId", description="SwitchId (serial number) of the replacement switch") + old_switch_id: str = Field(alias="oldSwitchId", description="SwitchId (serial number) of the switch being replaced") + public_key: str = Field(alias="publicKey", description="Public Key") + finger_print: str = Field(alias="fingerPrint", description="Fingerprint") + dhcp_bootstrap_ip: Optional[str] = Field(default=None, alias="dhcpBootstrapIp") + seed_switch: bool = Field(default=False, alias="seedSwitch") + data: Optional[Dict[str, Any]] = Field( + default=None, + description="Bootstrap configuration data block (gatewayIpMask, models)", + ) + + @field_validator("gateway_ip_mask", mode="before") + @classmethod + def validate_gateway(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_cidr_optional(v) + + @field_validator("hostname", mode="before") + @classmethod + def validate_host(cls, v: str) -> str: + return SwitchValidators.require_hostname(v) + + @field_validator("ip", "dhcp_bootstrap_ip", mode="before") + @classmethod + def validate_ip(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_ip_address(v) + + @field_validator("new_switch_id", mode="before") + @classmethod + def validate_serial(cls, v: str) -> str: + return SwitchValidators.require_serial_number(v, "new_switch_id") + + @field_validator("old_switch_id", mode="before") + @classmethod + def validate_old_serial(cls, v: str) -> str: + return SwitchValidators.require_serial_number(v, "old_switch_id") + + @computed_field(alias="useNewCredentials") + @property + def use_new_credentials(self) -> bool: + """Derive useNewCredentials from discoveryUsername and discoveryPassword.""" + return bool(self.discovery_username and self.discovery_password) + + @model_validator(mode="after") + def validate_rma_credentials(self) -> "RMASwitchModel": + """Validate RMA credential configuration logic.""" + if self.use_new_credentials: + if self.remote_credential_store == RemoteCredentialStore.CYBERARK: + if not self.remote_credential_store_key: + raise ValueError("remote_credential_store_key is required when remote_credential_store is 'cyberark'") + elif self.remote_credential_store == RemoteCredentialStore.LOCAL: + if not self.discovery_username or not self.discovery_password: + raise ValueError( + "discovery_username and discovery_password are required when remote_credential_store is 'local' and use_new_credentials is True" + ) + return self + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + return self.model_dump(by_alias=True, exclude_none=True) + + @classmethod + def from_response(cls, response: Dict[str, Any]) -> "RMASwitchModel": + """Create model instance from API response.""" + return cls.model_validate(response) + + +__all__ = [ + "RMASwitchModel", +] diff --git a/plugins/module_utils/models/manage_switches/switch_actions_models.py b/plugins/module_utils/models/manage_switches/switch_actions_models.py new file mode 100644 index 000000000..608cc2abd --- /dev/null +++ b/plugins/module_utils/models/manage_switches/switch_actions_models.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Switch action models (serial number change, IDs list, credentials). + +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import List, Literal, Optional, ClassVar + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + field_validator, + model_validator, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel + +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.validators import ( + SwitchValidators, +) + + +class SwitchCredentialsRequestModel(NDBaseModel): + """ + Request body to save LAN credentials for one or more fabric switches. + + Supports local credentials or remote credential store (such as CyberArk). + Path: POST /api/v1/manage/credentials/switches + """ + + identifiers: ClassVar[List[str]] = [] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "singleton" + + switch_ids: List[str] = Field( + alias="switchIds", + min_length=1, + description="List of switch serial numbers", + ) + switch_username: Optional[str] = Field(default=None, alias="switchUsername", description="Switch username") + switch_password: Optional[str] = Field(default=None, alias="switchPassword", description="Switch password") + remote_credential_store_key: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreKey", + description="Remote credential store key (e.g. CyberArk path)", + ) + remote_credential_store_type: Optional[str] = Field( + default=None, + alias="remoteCredentialStoreType", + description="Remote credential store type (e.g. 'cyberark')", + ) + + @field_validator("switch_ids", mode="before") + @classmethod + def validate_switch_ids(cls, v: List[str]) -> List[str]: + """Validate all switch IDs.""" + if not v: + raise ValueError("At least one switch ID is required") + validated = [] + for serial in v: + result = SwitchValidators.validate_serial_number(serial) + if result: + validated.append(result) + if not validated: + raise ValueError("No valid switch IDs provided") + return validated + + @model_validator(mode="after") + def validate_credentials(self) -> "SwitchCredentialsRequestModel": + """Ensure either local or remote credentials are provided.""" + has_local = self.switch_username is not None and self.switch_password is not None + has_remote = self.remote_credential_store_key is not None and self.remote_credential_store_type is not None + if not has_local and not has_remote: + raise ValueError( + "Either local credentials (switchUsername + switchPassword) " + "or remote credentials (remoteCredentialStoreKey + remoteCredentialStoreType) must be provided" + ) + return self + + +class ChangeSwitchSerialNumberRequestModel(NDBaseModel): + """ + Request body to update the serial number of an existing fabric switch. + + Path: POST /fabrics/{fabricName}/switches/{switchId}/actions/changeSwitchSerialNumber + """ + + identifiers: ClassVar[List[str]] = ["new_switch_id"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + new_switch_id: str = Field(alias="newSwitchId", description="New switchId") + + @field_validator("new_switch_id", mode="before") + @classmethod + def validate_serial(cls, v: str) -> str: + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError("new_switch_id cannot be empty") + return result + + +__all__ = [ + "SwitchCredentialsRequestModel", + "ChangeSwitchSerialNumberRequestModel", +] diff --git a/plugins/module_utils/models/manage_switches/switch_data_models.py b/plugins/module_utils/models/manage_switches/switch_data_models.py new file mode 100644 index 000000000..d6e8b4f7f --- /dev/null +++ b/plugins/module_utils/models/manage_switches/switch_data_models.py @@ -0,0 +1,342 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Switch inventory data models (API response representations). + +Based on OpenAPI schema for Nexus Dashboard Manage APIs v1.1.332. +""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +from typing import Any, Dict, List, Optional, ClassVar, Literal, Union + +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + Field, + field_validator, +) +from ansible_collections.cisco.nd.plugins.module_utils.models.base import NDBaseModel +from ansible_collections.cisco.nd.plugins.module_utils.models.nested import ( + NDNestedModel, +) + +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.enums import ( + AdvisoryLevel, + AnomalyLevel, + ConfigSyncStatus, + DiscoveryStatus, + PlatformType, + RemoteCredentialStore, + SwitchRole, + SystemMode, + VpcRole, +) +from .validators import SwitchValidators + + +class TelemetryIpCollection(NDNestedModel): + """ + Inband and out-of-band telemetry IP addresses for a switch. + """ + + identifiers: ClassVar[List[str]] = [] + inband_ipv4_address: Optional[str] = Field(default=None, alias="inbandIpV4Address", description="Inband IPv4 address") + inband_ipv6_address: Optional[str] = Field(default=None, alias="inbandIpV6Address", description="Inband IPv6 address") + out_of_band_ipv4_address: Optional[str] = Field( + default=None, + alias="outOfBandIpV4Address", + description="Out of band IPv4 address", + ) + out_of_band_ipv6_address: Optional[str] = Field( + default=None, + alias="outOfBandIpV6Address", + description="Out of band IPv6 address", + ) + + +class VpcData(NDNestedModel): + """ + vPC pair configuration and operational status for a switch. + """ + + identifiers: ClassVar[List[str]] = [] + vpc_domain: int = Field(alias="vpcDomain", ge=1, le=1000, description="vPC domain ID") + peer_switch_id: str = Field(alias="peerSwitchId", description="vPC peer switch serial number") + consistent_status: Optional[bool] = Field( + default=None, + alias="consistentStatus", + description="Flag to indicate the vPC status is consistent", + ) + intended_peer_name: Optional[str] = Field( + default=None, + alias="intendedPeerName", + description="Intended vPC host name for pre-provisioned peer switch", + ) + keep_alive_status: Optional[str] = Field(default=None, alias="keepAliveStatus", description="vPC peer keep alive status") + peer_link_status: Optional[str] = Field(default=None, alias="peerLinkStatus", description="vPC peer link status") + peer_name: Optional[str] = Field(default=None, alias="peerName", description="vPC peer switch name") + vpc_role: Optional[VpcRole] = Field(default=None, alias="vpcRole", description="The vPC role") + + @field_validator("peer_switch_id", mode="before") + @classmethod + def validate_peer_serial(cls, v: str) -> str: + return SwitchValidators.require_serial_number(v, "peer_switch_id") + + +class SwitchMetadata(NDNestedModel): + """ + Internal database identifiers associated with a switch record. + """ + + identifiers: ClassVar[List[str]] = [] + switch_db_id: Optional[int] = Field(default=None, alias="switchDbId", description="Database Id of the switch") + switch_uuid: Optional[str] = Field(default=None, alias="switchUuid", description="Internal unique Id of the switch") + + +class AdditionalSwitchData(NDNestedModel): + """ + Platform-specific additional data for NX-OS switches. + """ + + identifiers: ClassVar[List[str]] = [] + usage: Optional[str] = Field(default="others", description="The usage of additional data") + config_sync_status: Optional[ConfigSyncStatus] = Field(default=None, alias="configSyncStatus", description="Configuration sync status") + discovery_status: Optional[DiscoveryStatus] = Field(default=None, alias="discoveryStatus", description="Discovery status") + domain_name: Optional[str] = Field(default=None, alias="domainName", description="Domain name") + smart_switch: Optional[bool] = Field( + default=None, + alias="smartSwitch", + description="Flag that indicates if the switch is equipped with DPUs or not", + ) + hypershield_connectivity_status: Optional[str] = Field( + default=None, + alias="hypershieldConnectivityStatus", + description="Smart switch connectivity status to hypershield controller", + ) + hypershield_tenant: Optional[str] = Field(default=None, alias="hypershieldTenant", description="Hypershield tenant name") + hypershield_integration_name: Optional[str] = Field( + default=None, + alias="hypershieldIntegrationName", + description="Hypershield Integration Id", + ) + source_interface_name: Optional[str] = Field( + default=None, + alias="sourceInterfaceName", + description="Source interface for switch discovery", + ) + source_vrf_name: Optional[str] = Field( + default=None, + alias="sourceVrfName", + description="Source VRF for switch discovery", + ) + platform_type: Optional[PlatformType] = Field(default=None, alias="platformType", description="Platform type of the switch") + discovered_system_mode: Optional[SystemMode] = Field(default=None, alias="discoveredSystemMode", description="Discovered system mode") + intended_system_mode: Optional[SystemMode] = Field(default=None, alias="intendedSystemMode", description="Intended system mode") + scalable_unit: Optional[str] = Field(default=None, alias="scalableUnit", description="Name of the scalable unit") + system_mode: Optional[SystemMode] = Field(default=None, alias="systemMode", description="System mode") + vendor: Optional[str] = Field(default=None, description="Vendor of the switch") + username: Optional[str] = Field(default=None, description="Discovery user name") + remote_credential_store: Optional[RemoteCredentialStore] = Field(default=None, alias="remoteCredentialStore") + meta: Optional[SwitchMetadata] = Field(default=None, description="Switch metadata") + + +class AdditionalAciSwitchData(NDNestedModel): + """ + Platform-specific additional data for ACI leaf and spine switches. + """ + + identifiers: ClassVar[List[str]] = [] + usage: Optional[str] = Field(default="aci", description="The usage of additional data") + admin_status: Optional[Literal["inService", "outOfService"]] = Field(default=None, alias="adminStatus", description="Admin status") + health_score: Optional[int] = Field( + default=None, + alias="healthScore", + ge=1, + le=100, + description="Switch health score", + ) + last_reload_time: Optional[str] = Field( + default=None, + alias="lastReloadTime", + description="Timestamp when the system is last reloaded", + ) + last_software_update_time: Optional[str] = Field( + default=None, + alias="lastSoftwareUpdateTime", + description="Timestamp when the software is last updated", + ) + node_id: Optional[int] = Field(default=None, alias="nodeId", ge=1, description="Node ID") + node_status: Optional[Literal["active", "inActive"]] = Field(default=None, alias="nodeStatus", description="Node status") + pod_id: Optional[int] = Field(default=None, alias="podId", ge=1, description="Pod ID") + remote_leaf_group_name: Optional[str] = Field(default=None, alias="remoteLeafGroupName", description="Remote leaf group name") + switch_added: Optional[str] = Field( + default=None, + alias="switchAdded", + description="Timestamp when the switch is added", + ) + tep_pool: Optional[str] = Field(default=None, alias="tepPool", description="TEP IP pool") + + +class Metadata(NDNestedModel): + """ + Pagination and result-count metadata from a list API response. + """ + + identifiers: ClassVar[List[str]] = [] + + counts: Optional[Dict[str, int]] = Field(default=None, description="Count information including total and remaining") + + +class SwitchDataModel(NDBaseModel): + """ + Inventory record for a single switch as returned by the fabric switches API. + + Path: GET /fabrics/{fabricName}/switches + """ + + identifiers: ClassVar[List[str]] = ["switch_id"] + identifier_strategy: ClassVar[Optional[Literal["single", "composite", "hierarchical", "singleton"]]] = "single" + exclude_from_diff: ClassVar[set] = {"system_up_time", "anomaly_level", "advisory_level", "alert_suspend"} + switch_id: str = Field( + alias="switchId", + description="Serial number of Switch or Node Id of ACI switch", + ) + serial_number: Optional[str] = Field( + default=None, + alias="serialNumber", + description="Serial number of switch or APIC controller node", + ) + additional_data: Optional[Union[AdditionalSwitchData, AdditionalAciSwitchData]] = Field( + default=None, alias="additionalData", description="Additional switch data" + ) + advisory_level: Optional[AdvisoryLevel] = Field(default=None, alias="advisoryLevel") + anomaly_level: Optional[AnomalyLevel] = Field(default=None, alias="anomalyLevel") + alert_suspend: Optional[str] = Field(default=None, alias="alertSuspend") + fabric_management_ip: Optional[str] = Field( + default=None, + alias="fabricManagementIp", + description="Switch IPv4/v6 address used for management", + ) + fabric_name: Optional[str] = Field(default=None, alias="fabricName", description="Fabric name", max_length=64) + fabric_type: Optional[str] = Field(default=None, alias="fabricType", description="Fabric type") + hostname: Optional[str] = Field(default=None, description="Switch host name") + model: Optional[str] = Field(default=None, description="Model of switch or APIC controller node") + software_version: Optional[str] = Field( + default=None, + alias="softwareVersion", + description="Software version of switch or APIC controller node", + ) + switch_role: Optional[SwitchRole] = Field(default=None, alias="switchRole") + system_up_time: Optional[str] = Field(default=None, alias="systemUpTime", description="System up time") + vpc_configured: Optional[bool] = Field( + default=None, + alias="vpcConfigured", + description="Flag to indicate switch is part of a vPC domain", + ) + vpc_data: Optional[VpcData] = Field(default=None, alias="vpcData") + telemetry_ip_collection: Optional[TelemetryIpCollection] = Field(default=None, alias="telemetryIpCollection") + + @field_validator("additional_data", mode="before") + @classmethod + def parse_additional_data(cls, v: Any) -> Any: + """Route additionalData to the correct nested model. + + The NDFC API may omit the ``usage`` field for non-ACI switches. + Default to ``"others"`` so Pydantic selects ``AdditionalSwitchData`` + and coerces ``discoveryStatus`` / ``systemMode`` as proper enums. + """ + if v is None or not isinstance(v, dict): + return v + if "usage" not in v: + v = {**v, "usage": "others"} + return v + + @field_validator("switch_id", mode="before") + @classmethod + def validate_switch_id(cls, v: str) -> str: + return SwitchValidators.require_serial_number(v, "switch_id") + + @field_validator("fabric_management_ip", mode="before") + @classmethod + def validate_mgmt_ip(cls, v: Optional[str]) -> Optional[str]: + return SwitchValidators.validate_ip_address(v) + + def to_payload(self) -> Dict[str, Any]: + """Convert to API payload format.""" + return self.model_dump(by_alias=True, exclude_none=True) + + @classmethod + def from_response(cls, response: Dict[str, Any]) -> "SwitchDataModel": + """ + Create model instance from API response. + + Handles two response formats: + 1. Inventory API format: {switchId, fabricManagementIp, switchRole, ...} + 2. Discovery API format: {serialNumber, ip, hostname, model, softwareVersion, status, ...} + + Args: + response: Response dict from either inventory or discovery API + + Returns: + SwitchDataModel instance + """ + # Detect format and transform if needed + if "switchId" in response or "fabricManagementIp" in response: + # Already in inventory format - use as-is + return cls.model_validate(response) + + # Discovery format - transform to inventory format + transformed = { + "switchId": response.get("serialNumber"), + "serialNumber": response.get("serialNumber"), + "fabricManagementIp": response.get("ip"), + "hostname": response.get("hostname"), + "model": response.get("model"), + "softwareVersion": response.get("softwareVersion"), + "mode": response.get("mode", "Normal"), + } + + # Only add switchRole if present in response (avoid overwriting with None) + if "switchRole" in response: + transformed["switchRole"] = response["switchRole"] + elif "role" in response: + transformed["switchRole"] = response["role"] + + return cls.model_validate(transformed) + + def to_config_dict(self) -> Dict[str, Any]: + """Return this inventory record using the 7 standard user-facing fields. + + Produces a consistent dict for previous/current output keys. All 7 + fields are always present (None when not available). Credential fields + are never included. + + Returns: + Dict with keys: seed_ip, serial_number, hostname, model, + role, software_version, mode. + """ + ad = self.additional_data + return { + "seed_ip": self.fabric_management_ip or self.switch_id or "", + "serial_number": self.serial_number, + "hostname": self.hostname, + "model": self.model, + "role": self.switch_role, + "software_version": self.software_version, + "mode": (ad.system_mode if ad and hasattr(ad, "system_mode") else None), + } + + +__all__ = [ + "TelemetryIpCollection", + "VpcData", + "SwitchMetadata", + "AdditionalSwitchData", + "AdditionalAciSwitchData", + "Metadata", + "SwitchDataModel", +] diff --git a/plugins/module_utils/models/manage_switches/validators.py b/plugins/module_utils/models/manage_switches/validators.py new file mode 100644 index 000000000..5c3160420 --- /dev/null +++ b/plugins/module_utils/models/manage_switches/validators.py @@ -0,0 +1,230 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +"""Common validators for switch-related fields.""" + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type + +import re +from ipaddress import ip_address, ip_network +from typing import Optional + + +class SwitchValidators: + """ + Common validators for switch-related fields. + + The ``validate_*`` static methods are safe to call from Pydantic + ``@field_validator`` bodies. They return ``None`` when the value is + absent and raise ``ValueError`` on bad input. + + The ``require_*`` helpers are convenience wrappers that additionally + raise ``ValueError`` when the result is ``None`` (i.e. the field was + empty after stripping). Use them in place of the repetitive + ``result = …; if result is None: raise …`` pattern. + + ``check_discovery_credentials_pair`` is a shared ``@model_validator`` + helper that enforces the mutual-presence rule for discovery credentials. + """ + + # ------------------------------------------------------------------ + # Low-level nullable validators (return None when absent) + # ------------------------------------------------------------------ + + @staticmethod + def validate_ip_address(v: Optional[str]) -> Optional[str]: + """Validate IPv4 or IPv6 address.""" + if v is None: + return None + v = str(v).strip() + if not v: + return None + try: + ip_address(v) + return v + except ValueError: + raise ValueError(f"Invalid IP address format: {v}") + + @staticmethod + def validate_cidr(v: Optional[str]) -> Optional[str]: + """Validate CIDR notation (IP/mask).""" + if v is None: + return None + v = str(v).strip() + if not v: + return None + if "/" not in v: + raise ValueError(f"CIDR notation required (IP/mask format): {v}") + try: + ip_network(v, strict=False) + return v + except ValueError: + raise ValueError(f"Invalid CIDR format: {v}") + + @staticmethod + def validate_serial_number(v: Optional[str]) -> Optional[str]: + """Validate switch serial number format.""" + if v is None: + return None + v = str(v).strip() + if not v: + return None + # Serial numbers are typically alphanumeric with optional hyphens + if not re.match(r"^[A-Za-z0-9_-]+$", v): + raise ValueError(f"Serial number must be alphanumeric with optional hyphens/underscores: {v}") + return v + + @staticmethod + def validate_hostname(v: Optional[str]) -> Optional[str]: + """Validate hostname format.""" + if v is None: + return None + v = str(v).strip() + if not v: + return None + # RFC 1123 hostname validation + if len(v) > 255: + raise ValueError("Hostname cannot exceed 255 characters") + # Allow alphanumeric, dots, hyphens, underscores + if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9._-]*$", v): + raise ValueError(f"Invalid hostname format. Must start with alphanumeric and " f"contain only alphanumeric, dots, hyphens, underscores: {v}") + if v.startswith(".") or v.endswith(".") or ".." in v: + raise ValueError(f"Invalid hostname format (dots): {v}") + return v + + @staticmethod + def validate_mac_address(v: Optional[str]) -> Optional[str]: + """Validate MAC address format.""" + if v is None: + return None + v = str(v).strip() + if not v: + return None + # Accept colon or hyphen separated MAC addresses + mac_pattern = r"^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$" + if not re.match(mac_pattern, v): + raise ValueError(f"Invalid MAC address format: {v}") + return v + + @staticmethod + def validate_vpc_domain(v: Optional[int]) -> Optional[int]: + """Validate VPC domain ID (1-1000).""" + if v is None: + return None + if not 1 <= v <= 1000: + raise ValueError(f"VPC domain must be between 1 and 1000: {v}") + return v + + # ------------------------------------------------------------------ + # Required-field helpers (raise ValueError when value is absent) + # ------------------------------------------------------------------ + + @staticmethod + def require_serial_number(v: str, field_name: str = "serial_number") -> str: + """Validate and require a non-empty serial number. + + Delegates to ``validate_serial_number`` and raises ``ValueError`` + when the result is ``None`` (empty after stripping). + + Args: + v: Raw serial number value from Pydantic. + field_name: Field name used in the error message. + + Returns: + Validated serial number string. + + Raises: + ValueError: When the value is empty or contains invalid characters. + """ + result = SwitchValidators.validate_serial_number(v) + if result is None: + raise ValueError(f"{field_name} cannot be empty") + return result + + @staticmethod + def require_hostname(v: str) -> str: + """Validate and require a non-empty hostname. + + Args: + v: Raw hostname value from Pydantic. + + Returns: + Validated hostname string. + + Raises: + ValueError: When the value is empty or fails RFC 1123 checks. + """ + result = SwitchValidators.validate_hostname(v) + if result is None: + raise ValueError("hostname cannot be empty") + return result + + @staticmethod + def require_ip_address(v: str) -> str: + """Validate and require a non-empty IP address. + + Args: + v: Raw IP address value from Pydantic. + + Returns: + Validated IP address string. + + Raises: + ValueError: When the value is empty or not a valid IPv4/v6 address. + """ + result = SwitchValidators.validate_ip_address(v) + if result is None: + raise ValueError(f"Invalid IP address: {v}") + return result + + @staticmethod + def validate_cidr_optional(v: Optional[str]) -> Optional[str]: + """Validate an optional CIDR string; pass through ``None`` unchanged. + + Args: + v: Raw CIDR value or ``None``. + + Returns: + Validated CIDR string, or ``None``. + + Raises: + ValueError: When the value is present but not valid CIDR notation. + """ + if v is None: + return None + result = SwitchValidators.validate_cidr(v) + if result is None: + raise ValueError(f"Invalid CIDR notation: {v}") + return result + + @staticmethod + def check_discovery_credentials_pair(username: Optional[str], password: Optional[str]) -> None: + """Enforce mutual-presence of discovery credentials. + + Both ``discovery_username`` and ``discovery_password`` must either be + absent together or present together. Call from a ``@model_validator`` + body to avoid duplicating the same four-line check across every model. + + Args: + username: discovery_username value (may be ``None``). + password: discovery_password value (may be ``None``). + + Raises: + ValueError: When exactly one of the two is provided. + """ + has_user = bool(username) + has_pass = bool(password) + if has_user and not has_pass: + raise ValueError("discovery_password must be set when discovery_username is specified") + if has_pass and not has_user: + raise ValueError("discovery_username must be set when discovery_password is specified") + + +__all__ = [ + "SwitchValidators", +] diff --git a/plugins/module_utils/utils.py b/plugins/module_utils/utils.py index d4c3e59b8..532d653ee 100644 --- a/plugins/module_utils/utils.py +++ b/plugins/module_utils/utils.py @@ -4,8 +4,21 @@ from __future__ import absolute_import, division, print_function +import logging +import time from copy import deepcopy -from typing import Any, Dict, List, Union +from typing import Any, Dict, List, Optional, Union + +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( + EpManageFabricConfigDeployPost, + EpManageFabricsGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_actions import ( + EpManageFabricsActionsConfigSavePost, +) +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switchactions import ( + EpManageFabricsSwitchActionsDeployPost, +) def sanitize_dict(dict_to_sanitize, keys=None, values=None, recursive=True, remove_none_values=True): @@ -87,3 +100,242 @@ def remove_unwanted_keys(data: Dict, unwanted_keys: List[Union[str, List[str]]]) pass return data + + +# ========================================================================= +# Exceptions +# ========================================================================= + + +class SwitchOperationError(Exception): + """Raised when a switch operation fails.""" + + +# ========================================================================= +# API Response Validation +# ========================================================================= + + +class ApiDataChecker: + """Detect controller-embedded errors in API response DATA payloads. + + The Nexus Dashboard API signals certain errors by embedding an error + object inside ``DATA`` as ``{"code": , "message": ""}`` even + when the transport-level result is marked successful. Any payload dict + that contains a ``"code"`` key is treated as an error; the absence of + ``"code"`` means the payload is a genuine data body. + """ + + @staticmethod + def check( + data: Any, + context: str, + log: logging.Logger, + fail_callback=None, + ) -> None: + """Fail or raise if the response DATA contains an embedded error code. + + Args: + data: Value returned by ``nd.request()`` or extracted from + ``response_current["DATA"]``. + context: Human-readable description of the operation. + log: Logger instance. + fail_callback: Optional callable (e.g. ``module.fail_json``) that + accepts a ``msg`` keyword argument. When provided + it is called on error instead of raising + ``SwitchOperationError``. + """ + if isinstance(data, dict) and "code" in data: + error_msg = data.get("message", "Unknown error") + msg = ( + f"{context} failed — controller returned error: " + f"{error_msg} (code={data['code']})" + ) + log.error(msg) + if fail_callback is not None: + fail_callback(msg=msg) + else: + raise SwitchOperationError(msg) + + +# ========================================================================= +# Fabric Utilities +# ========================================================================= + + +class FabricUtils: + """Fabric-level operations: config save, deploy, and info retrieval.""" + + def __init__( + self, + nd_module, + fabric: str, + logger: Optional[logging.Logger] = None, + ): + """Initialize FabricUtils. + + Args: + nd_module: NDModule or NDNetworkResourceModule instance. + fabric: Fabric name. + logger: Optional logger; defaults to ``nd.FabricUtils``. + """ + self.nd = nd_module + self.fabric = fabric + self.log = logger or logging.getLogger("nd.FabricUtils") + + # Pre-configure endpoints + self.ep_config_save = EpManageFabricsActionsConfigSavePost() + self.ep_config_save.fabric_name = fabric + + self.ep_config_deploy = EpManageFabricConfigDeployPost() + self.ep_config_deploy.fabric_name = fabric + + self.ep_switch_deploy = EpManageFabricsSwitchActionsDeployPost() + self.ep_switch_deploy.fabric_name = fabric + + self.ep_fabric_get = EpManageFabricsGet() + self.ep_fabric_get.fabric_name = fabric + + # ----------------------------------------------------------------- + # Public API + # ----------------------------------------------------------------- + + def save_config( + self, + max_retries: int = 3, + retry_delay: int = 600, + ) -> Dict[str, Any]: + """Save (recalculate) fabric configuration. + + Retries up to ``max_retries`` times with ``retry_delay`` seconds + between attempts. + + Args: + max_retries: Maximum number of attempts (default ``3``). + retry_delay: Seconds to wait between failed attempts + (default ``600``). + + Returns: + API response dict from the first successful attempt. + + Raises: + SwitchOperationError: If all attempts fail. + """ + last_error: Exception = SwitchOperationError(f"Config save produced no attempts for fabric {self.fabric}") + for attempt in range(1, max_retries + 1): + try: + response = self._request_endpoint(self.ep_config_save, action="Config save") + self.log.info( + "Config save succeeded on attempt %s/%s for fabric %s", + attempt, + max_retries, + self.fabric, + ) + return response + except SwitchOperationError as exc: + last_error = exc + self.log.warning( + "Config save attempt %s/%s failed for fabric %s: %s", + attempt, + max_retries, + self.fabric, + exc, + ) + if attempt < max_retries: + self.log.info( + "Retrying config save in %ss (attempt %s/%s)", + retry_delay, + attempt + 1, + max_retries, + ) + time.sleep(retry_delay) + raise SwitchOperationError(f"Config save failed after {max_retries} attempt(s) " f"for fabric {self.fabric}: {last_error}") + + def deploy_config(self) -> Dict[str, Any]: + """Deploy pending configuration to all switches in the fabric. + + The ``configDeploy`` endpoint requires no request body; it deploys + all pending changes for the fabric. + + Returns: + API response dict. + + Raises: + SwitchOperationError: If the deploy request fails. + """ + return self._request_endpoint(self.ep_config_deploy, action="Config deploy") + + def deploy_switches(self, serial_numbers: List[str]) -> Dict[str, Any]: + """Deploy pending configuration for specific switches only. + + Uses the switch-level deploy endpoint which targets only the supplied + switches rather than all pending changes for the entire fabric. + + Args: + serial_numbers: Switch serial numbers (identifiers) to deploy. + + Returns: + API response dict. + + Raises: + SwitchOperationError: If the deploy request fails. + """ + self.log.info( + "Switch-level deploy for %s switch(es) in fabric: %s", + len(serial_numbers), + self.fabric, + ) + try: + response = self.nd.request( + self.ep_switch_deploy.path, + verb=self.ep_switch_deploy.verb, + data={"switchIds": serial_numbers}, + ) + ApiDataChecker.check(response, f"Switch-level deploy for fabric '{self.fabric}'", self.log) + self.log.info("Switch-level deploy completed for fabric: %s", self.fabric) + return response + except SwitchOperationError: + raise + except Exception as e: + self.log.error("Switch-level deploy failed for fabric %s: %s", self.fabric, e) + raise SwitchOperationError(f"Switch-level deploy failed for fabric {self.fabric}: {e}") from e + + def get_fabric_info(self) -> Dict[str, Any]: + """Retrieve fabric information. + + Returns: + Fabric information dict. + + Raises: + SwitchOperationError: If the request fails. + """ + return self._request_endpoint(self.ep_fabric_get, action="Get fabric info") + + # ----------------------------------------------------------------- + # Internal helpers + # ----------------------------------------------------------------- + + def _request_endpoint(self, endpoint, action: str = "Request") -> Dict[str, Any]: + """Execute a request against a pre-configured endpoint. + + Args: + endpoint: Endpoint object with ``.path`` and ``.verb``. + action: Human-readable label for log messages. + + Returns: + API response dict. + + Raises: + SwitchOperationError: On any request failure. + """ + self.log.info("%s for fabric: %s", action, self.fabric) + try: + response = self.nd.request(endpoint.path, verb=endpoint.verb) + ApiDataChecker.check(response, f"{action} for fabric '{self.fabric}'", self.log) + self.log.info("%s completed for fabric: %s", action, self.fabric) + return response + except SwitchOperationError: + raise + except Exception as e: + self.log.error("%s failed for fabric %s: %s", action, self.fabric, e) + raise SwitchOperationError(f"{action} failed for fabric {self.fabric}: {e}") from e diff --git a/plugins/modules/nd_manage_switches.py b/plugins/modules/nd_manage_switches.py new file mode 100644 index 000000000..1f6e196e8 --- /dev/null +++ b/plugins/modules/nd_manage_switches.py @@ -0,0 +1,519 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2026, Akshayanat C S (@achengam) +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function + +__metaclass__ = type +__copyright__ = "Copyright (c) 2026 Cisco and/or its affiliates." +__author__ = "Akshayanat C S" + +DOCUMENTATION = """ +--- +module: nd_manage_switches +short_description: Manage switches in Cisco Nexus Dashboard (ND). +version_added: "1.0.0" +author: Akshayanat C S (@achengam) +description: +- Add, delete, and override switches in Cisco Nexus Dashboard. +- Supports normal discovery, POAP (bootstrap/preprovision), and RMA operations. +- Uses Pydantic model validation for switch configurations. +- Provides state-based operations with intelligent diff calculation. +options: + fabric: + description: + - Name of the target fabric for switch operations. + type: str + required: yes + state: + description: + - The state of ND and switch(es) after module completion. + - C(merged), C(replaced), and C(overridden) are supported for POAP and pre-provision operations. + - C(merged) is the only state supported for RMA. + - C(replaced) reconciles only the switches listed in C(config). Field differences + trigger delete and re-add, but fabric switches not listed in C(config) are left + untouched. + - C(gathered) reads the current fabric inventory and returns it in the + C(gathered) key in config format. No changes are made. + type: str + default: merged + choices: + - merged + - replaced + - overridden + - deleted + - gathered + config_actions: + description: + - Controls save and deploy behavior after inventory is updated. + type: dict + suboptions: + save: + description: + - Save/Recalculate the configuration of the fabric after inventory is updated. + type: bool + default: true + deploy: + description: + - Deploy the pending configuration after inventory is updated. + - When set to C(true), C(save) must also be C(true). + type: bool + default: true + type: + description: + - Scope of the deploy operation. + - C(switch) deploys only the switches affected in this run. + - C(global) deploys all pending changes for the entire fabric. + type: str + default: switch + choices: + - switch + - global + config: + description: + - List of switch configurations. Optional for state C(deleted). + type: list + elements: dict + suboptions: + seed_ip: + description: + - Seed IP address or DNS name of the switch to manage. + type: str + required: true + auth_proto: + description: + - SNMP authentication protocol to use. + - For POAP and RMA, should be C(MD5). + type: str + default: MD5 + choices: ['MD5', 'SHA', 'MD5_DES', 'MD5_AES', 'SHA_DES', 'SHA_AES'] + username: + description: + - Login username for the switch. + - For POAP and RMA, should be C(admin). + type: str + password: + description: + - Login password for the switch. + type: str + role: + description: + - Role to assign to the switch in the fabric. + type: str + default: leaf + choices: + - leaf + - spine + - border + - border_spine + - border_gateway + - border_gateway_spine + - super_spine + - border_super_spine + - border_gateway_super_spine + - access + - aggregation + - edge_router + - core_router + - tor + preserve_config: + description: + - Set to C(false) for greenfield deployment, C(true) for brownfield. + type: bool + default: false + poap: + description: + - Bootstrap POAP config for the switch. + - C(serial_number) and C(hostname) are mandatory. + - Model, version, and config data are sourced from the bootstrap API at runtime. + - If the bootstrap API reports a different hostname or role, the API value + overrides the user-provided value and a warning is logged. + - To perform a B(swap) operation, provide both C(poap) and C(preprovision) + under the same switch config. Only C(serial_number) is required in each. + - POAP and DHCP must be enabled in fabric before using. + type: dict + suboptions: + serial_number: + description: + - Serial number of the physical switch to Bootstrap. + - Required for bootstrap and swap operations. + type: str + required: true + hostname: + description: + - Hostname for the switch during bootstrap. + - Overridden by the bootstrap API value when they differ (warning logged). + type: str + required: true + discovery_username: + description: + - Username for device discovery during POAP. + type: str + discovery_password: + description: + - Password for device discovery during POAP. + type: str + image_policy: + description: + - Name of the image policy to be applied on the switch. + type: str + preprovision: + description: + - Pre-provision config for the switch. + - All five fields are mandatory since the controller has no physical switch + to pull values from. + - To perform a B(swap) operation, provide both C(poap) and C(preprovision) + under the same switch config. Only C(serial_number) is required in each; + extra fields are ignored with a warning. + - POAP and DHCP must be enabled in fabric before using. + type: dict + suboptions: + serial_number: + description: + - Serial number of the switch to Pre-provision. + type: str + required: true + discovery_username: + description: + - Username for device discovery during pre-provision. + type: str + discovery_password: + description: + - Password for device discovery during pre-provision. + type: str + model: + description: + - Model of the switch to Pre-provision (e.g., N9K-C93180YC-EX). + type: str + required: true + version: + description: + - Software version of the switch to Pre-provision (e.g., 10.3(1)). + type: str + required: true + hostname: + description: + - Hostname for the switch during pre-provision. + type: str + required: true + image_policy: + description: + - Image policy to apply during pre-provision. + type: str + config_data: + description: + - Basic configuration data for the switch during Pre-provision. + - C(models) and C(gateway) are mandatory. + - C(models) is a list of module models in the switch. + - C(gateway) is the gateway IP with mask for the switch. + type: dict + required: true + suboptions: + models: + description: + - List of module models in the switch (e.g., [N9K-X9364v, N9K-vSUP]). + type: list + elements: str + required: true + gateway: + description: + - Gateway IP with subnet mask (e.g., 192.168.0.1/24). + type: str + required: true + rma: + description: + - RMA an existing switch with a new one. + - The switch being replaced is identified by C(seed_ip). + - The existing switch must be configured, deployed in maintenance mode, + and then shutdown (unreachable state) before initiating RMA. + type: list + elements: dict + suboptions: + new_serial_number: + description: + - Serial number of the replacement switch in the POAP/bootstrap loop. + type: str + required: true + image_policy: + description: + - Name of the image policy to be applied on the replacement switch. + type: str + discovery_username: + description: + - Username for device discovery during RMA bootstrap. + type: str + discovery_password: + description: + - Password for device discovery during RMA bootstrap. + type: str + +extends_documentation_fragment: +- cisco.nd.modules +- cisco.nd.check_mode +notes: +- This module requires ND 4.2 or higher. +- POAP operations require POAP and DHCP to be enabled in fabric settings. +- RMA operations require the old switch to be in a replaceable state. +- Idempotence for B(Bootstrap) - A bootstrap entry is considered idempotent when + the C(seed_ip) already exists in the fabric inventory B(and) the C(serial_number) + in the POAP config matches the serial number recorded for that IP in inventory. + Both conditions must be true; a matching IP with a different serial is not + treated as idempotent and will attempt the bootstrap again. +- Idempotence for B(Pre-provision) - A pre-provision entry is considered idempotent + when the C(seed_ip) already exists in the fabric inventory, regardless of the + C(serial_number) value under C(preprovision). Because the pre-provision serial is + a placeholder that may differ from the real hardware serial, only the IP address + is used as the stable identity for idempotency checks. +- Idempotence for B(normal discovery) - A switch is considered idempotent when + its C(seed_ip) already exists in the fabric inventory with no configuration + drift (same role). +""" + +EXAMPLES = """ +- name: Add a switch to fabric + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.201 + username: admin + password: "{{ switch_password }}" + role: leaf + preserve_config: false + state: merged + +- name: Add multiple switches + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.201 + username: admin + password: "{{ switch_password }}" + role: leaf + preserve_config: false + - seed_ip: 192.168.10.202 + username: admin + password: "{{ switch_password }}" + role: spine + preserve_config: false + state: merged + +- name: Preprovision a switch via POAP + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.1 + username: admin + password: "{{ switch_password }}" + role: spine + preprovision: + serial_number: SAL1234ABCD + model: N9K-C93180YC-EX + version: "10.3(1)" + hostname: leaf-preprov + image_policy: my-image-policy + discovery_username: root + discovery_password: "{{ discovery_password }}" + config_data: + models: + - N9K-C93180YC-EX + gateway: 192.168.10.1/24 + state: merged + +- name: Bootstrap a switch via POAP + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.1 + username: admin + password: "{{ switch_password }}" + role: leaf + poap: + serial_number: SAL5678EFGH + hostname: leaf-bootstrap + image_policy: my-image-policy + discovery_username: root + discovery_password: "{{ discovery_password }}" + state: merged + +- name: Swap serial number on a pre-provisioned switch (POAP swap) + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.1 + username: admin + password: "{{ switch_password }}" + poap: + serial_number: SAL5678EFGH + preprovision: + serial_number: SAL1234ABCD + state: merged + +- name: RMA - Replace a switch + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.1 + username: admin + password: "{{ switch_password }}" + rma: + - new_serial_number: SAL9999ZZZZ + image_policy: my-image-policy + discovery_username: root + discovery_password: "{{ discovery_password }}" + state: merged + +- name: Remove switches from fabric + cisco.nd.nd_manage_switches: + fabric: my-fabric + config: + - seed_ip: 192.168.10.201 + - seed_ip: 192.168.10.202 + state: deleted + +- name: Gather all switches from fabric + cisco.nd.nd_manage_switches: + fabric: my-fabric + state: gathered + register: result +""" + +RETURN = r""" +""" + +import logging + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.cisco.nd.plugins.module_utils.common.pydantic_compat import ( + require_pydantic, +) +from ansible_collections.cisco.nd.plugins.module_utils.common.log import Log +from ansible_collections.cisco.nd.plugins.module_utils.models.manage_switches.config_models import ( + SwitchConfigModel, +) +from ansible_collections.cisco.nd.plugins.module_utils.manage_switches.nd_switch_resources import ( + NDSwitchResourceModule, +) +from ansible_collections.cisco.nd.plugins.module_utils.nd_v2 import ( + NDModule, + NDModuleError, + nd_argument_spec, +) +from ansible_collections.cisco.nd.plugins.module_utils.rest.results import Results + + +def main(): + """Main entry point for the nd_manage_switches module.""" + + # Build argument spec + argument_spec = nd_argument_spec() + argument_spec.update(SwitchConfigModel.get_argument_spec()) + + # Create Ansible module + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=[ + ("state", "merged", ["config"]), + ("state", "replaced", ["config"]), + ], + ) + + require_pydantic(module) + + config_actions = module.params.get("config_actions") or {} + if config_actions.get("deploy", True) and not config_actions.get("save", True): + module.fail_json(msg="'config_actions.deploy: true' requires 'config_actions.save: true'") + + # Initialize logging + try: + log_config = Log() + log_config.commit() + # Create logger instance for this module + log = logging.getLogger("nd.nd_manage_switches") + except ValueError as error: + module.fail_json(msg=str(error)) + + # Get parameters + output_level = module.params.get("output_level") + + # Initialize Results - this collects all operation results + results = Results() + results.check_mode = module.check_mode + results.action = "manage_switches" + + try: + # Initialize NDModule (uses RestSend infrastructure internally) + nd = NDModule(module) + + # Create NDSwitchResourceModule + sw_module = NDSwitchResourceModule(nd=nd, results=results, logger=log) + + # Manage state for merged, overridden, deleted + sw_module.manage_state() + + # Exit with results + log.info("State management completed successfully. Changed: %s", results.changed) + sw_module.exit_json() + + except NDModuleError as error: + # NDModule-specific errors (API failures, authentication issues, etc.) + log.error("NDModule error: %s", error.msg) + + # Try to get response from RestSend if available + try: + results.response_current = nd.rest_send.response_current + results.result_current = nd.rest_send.result_current + except (AttributeError, ValueError): + # Fallback if RestSend wasn't initialized or no response available + results.response_current = { + "RETURN_CODE": error.status if error.status else -1, + "MESSAGE": error.msg, + "DATA": error.response_payload if error.response_payload else {}, + } + results.result_current = { + "success": False, + "found": False, + } + + results.diff_current = {} + results.register_api_call() + results.build_final_result() + + # Add error details if debug output is requested + if output_level == "debug": + results.final_result["error_details"] = error.to_dict() + + log.error("Module failed: %s", results.final_result) + module.fail_json(msg=error.msg, **results.final_result) + + except Exception as error: + # Unexpected errors + log.error("Unexpected error during module execution: %s", str(error)) + log.error("Error type: %s", error.__class__.__name__) + + # Build failed result + results.response_current = { + "RETURN_CODE": -1, + "MESSAGE": f"Unexpected error: {str(error)}", + "DATA": {}, + } + results.result_current = { + "success": False, + "found": False, + } + results.diff_current = {} + results.register_api_call() + results.build_final_result() + + if output_level == "debug": + import traceback + + results.final_result["traceback"] = traceback.format_exc() + + module.fail_json(msg=str(error), **results.final_result) + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/nd_manage_switches/defaults/main.yaml b/tests/integration/targets/nd_manage_switches/defaults/main.yaml new file mode 100644 index 000000000..5f709c5aa --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +testcase: "*" diff --git a/tests/integration/targets/nd_manage_switches/files/.gitignore b/tests/integration/targets/nd_manage_switches/files/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/tests/integration/targets/nd_manage_switches/meta/main.yaml b/tests/integration/targets/nd_manage_switches/meta/main.yaml new file mode 100644 index 000000000..32cf5dda7 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/meta/main.yaml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml b/tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml new file mode 100644 index 000000000..c858d2b43 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tasks/base_tasks.yaml @@ -0,0 +1,70 @@ +--- +- name: Test Entry Point - [nd_manage_switches] + ansible.builtin.debug: + msg: + - "----------------------------------------------------------------" + - "+ Executing Base Tests - [nd_manage_switches] +" + - "----------------------------------------------------------------" + +# -------------------------------- +# Create Dictionary of Test Data +# -------------------------------- +- name: Base - Setup Internal TestCase Variables + ansible.builtin.set_fact: + test_data: + test_fabric: "{{ ansible_it_fabric }}" + sw1: "{{ ansible_switch1 }}" + sw2: "{{ ansible_switch2 }}" + sw3: "{{ ansible_switch3 }}" + config_actions: + save: "{{ save }}" + deploy: "{{ deploy }}" + type: "{{ config_actions_type }}" + delegate_to: localhost + +# ---------------------------------------------- +# Create Module Payloads using Jinja2 Templates +# ---------------------------------------------- + +- name: Base - Prepare Configuration + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{test_data.sw1}}" + auth_proto: MD5 + role: leaf + - seed_ip: "{{test_data.sw2}}" + auth_proto: MD5 + role: spine + - seed_ip: "{{test_data.sw3}}" + auth_proto: MD5 + role: border + delegate_to: localhost + + +- name: Import Configuration Prepare Tasks + vars: + file: base + ansible.builtin.import_tasks: ./conf_prep_tasks.yaml + +# ---------------------------------------------- +# Test Setup +# ---------------------------------------------- + +- name: Base - Verify fabric is reachable via API + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}" + method: get + register: fabric_query + ignore_errors: true + +- name: Base - Assert fabric exists + ansible.builtin.assert: + that: + - fabric_query.status == 200 + fail_msg: "Fabric '{{ test_data.test_fabric }}' not found (HTTP {{ fabric_query.status }})." + success_msg: "Fabric '{{ test_data.test_fabric }}' found." + +- name: Base - Clean Up Existing Devices in Fabric + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted \ No newline at end of file diff --git a/tests/integration/targets/nd_manage_switches/tasks/conf_prep_tasks.yaml b/tests/integration/targets/nd_manage_switches/tasks/conf_prep_tasks.yaml new file mode 100644 index 000000000..dce2fdec5 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tasks/conf_prep_tasks.yaml @@ -0,0 +1,11 @@ +--- +- name: Build Fabric Base Config Data + ansible.builtin.template: + src: nd_manage_switches_conf.j2 + dest: "{{ role_path }}/files/nd_manage_switches_{{file}}_conf.yaml" + delegate_to: localhost + +- name: Access Fabric Configuration Data and Save to Local Variable + ansible.builtin.set_fact: + "{{ 'nd_switches_' + file +'_conf' }}": "{{ lookup('file', '{{ role_path }}/files/nd_manage_switches_{{file}}_conf.yaml') | from_yaml }}" + delegate_to: localhost diff --git a/tests/integration/targets/nd_manage_switches/tasks/main.yaml b/tests/integration/targets/nd_manage_switches/tasks/main.yaml new file mode 100644 index 000000000..6f6ed05cd --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tasks/main.yaml @@ -0,0 +1,17 @@ +--- +- name: Discover ND Test Cases + ansible.builtin.find: + paths: "{{ role_path }}/tests" + patterns: "{{ testcase }}.yaml" + connection: local + register: nd_testcases + +- name: Build List of Test Items + ansible.builtin.set_fact: + test_items: "{{ nd_testcases.files | map(attribute='path') | list }}" + +- name: Run ND Test Cases + ansible.builtin.include_tasks: "{{ test_case_to_run }}" + with_items: "{{ test_items }}" + loop_control: + loop_var: test_case_to_run diff --git a/tests/integration/targets/nd_manage_switches/tasks/query_task.yaml b/tests/integration/targets/nd_manage_switches/tasks/query_task.yaml new file mode 100644 index 000000000..7f851042b --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tasks/query_task.yaml @@ -0,0 +1,33 @@ +--- +- name: "Query Task: Authenticate with ND to get token" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/login" + method: POST + headers: + Content-Type: "application/json" + body_format: json + body: + domain: "{{ ansible_httpapi_login_domain | default('local') }}" + userName: "{{ ansible_user }}" + userPasswd: "{{ ansible_password }}" + validate_certs: false + return_content: true + status_code: + - 200 + register: nd_auth_response + delegate_to: localhost + +- name: "Query Task: Query {{ test_data.test_fabric }} switch data from ND" + ansible.builtin.uri: + url: "https://{{ ansible_host }}:{{ ansible_httpapi_port | default(443) }}/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: GET + headers: + Authorization: "Bearer {{ nd_auth_response.json.jwttoken }}" + Content-Type: "application/json" + validate_certs: false + return_content: true + status_code: + - 200 + - 404 + register: query_result + delegate_to: localhost diff --git a/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 b/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 new file mode 100644 index 000000000..9fbc38ce6 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/templates/nd_manage_switches_conf.j2 @@ -0,0 +1,74 @@ +--- +# This ND test data structure is auto-generated +# DO NOT EDIT MANUALLY +# + +# ------------------------------ +# Fabric Switches +# ------------------------------ + +{% if switch_conf is iterable %} +{% set switch_list = [] %} +{% for switch in switch_conf %} +{% set switch_item = {} %} +{% if switch.seed_ip is defined %} +{% set _ = switch_item.update({'seed_ip': switch.seed_ip | default('') }) %} +{% endif %} +{% set _ = switch_item.update({'username': switch_username}) %} +{% set _ = switch_item.update({'password': switch_password}) %} +{% if switch.role is defined %} +{% set _ = switch_item.update({'role': switch.role | default('') }) %} +{% endif %} +{% if switch.poap is defined and switch.poap %} +{% set poap_item = {} %} +{% set _ = poap_item.update({'serial_number': switch.poap.serial_number}) %} +{% set _ = poap_item.update({'hostname': switch.poap.hostname}) %} +{% if switch.poap.image_policy is defined and switch.poap.image_policy %} +{% set _ = poap_item.update({'image_policy': switch.poap.image_policy}) %} +{% endif %} +{% if switch.poap.discovery_username is defined and switch.poap.discovery_username %} +{% set _ = poap_item.update({'discovery_username': switch.poap.discovery_username}) %} +{% endif %} +{% if switch.poap.discovery_password is defined and switch.poap.discovery_password %} +{% set _ = poap_item.update({'discovery_password': switch.poap.discovery_password}) %} +{% endif %} +{% set _ = switch_item.update({'poap': poap_item}) %} +{% endif %} +{% if switch.preprovision is defined and switch.preprovision %} +{% set preprov_item = {} %} +{% set _ = preprov_item.update({'serial_number': switch.preprovision.serial_number}) %} +{% set _ = preprov_item.update({'model': switch.preprovision.model}) %} +{% set _ = preprov_item.update({'version': switch.preprovision.version}) %} +{% set _ = preprov_item.update({'hostname': switch.preprovision.hostname}) %} +{% if switch.preprovision.config_data is defined %} +{% set preprov_config = {} %} +{% for k in switch.preprovision.config_data %} +{% set _ = preprov_config.update({k: switch.preprovision.config_data[k]}) %} +{% endfor %} +{% set _ = preprov_item.update({'config_data': preprov_config}) %} +{% endif %} +{% if switch.preprovision.image_policy is defined and switch.preprovision.image_policy %} +{% set _ = preprov_item.update({'image_policy': switch.preprovision.image_policy}) %} +{% endif %} +{% if switch.preprovision.discovery_username is defined and switch.preprovision.discovery_username %} +{% set _ = preprov_item.update({'discovery_username': switch.preprovision.discovery_username}) %} +{% endif %} +{% if switch.preprovision.discovery_password is defined and switch.preprovision.discovery_password %} +{% set _ = preprov_item.update({'discovery_password': switch.preprovision.discovery_password}) %} +{% endif %} +{% set _ = switch_item.update({'preprovision': preprov_item}) %} +{% endif %} +{% if switch.poap is not defined and switch.preprovision is not defined %} +{% if switch.auth_proto is defined %} +{% set _ = switch_item.update({'auth_proto': switch.auth_proto | default('') }) %} +{% endif %} +{% if switch.preserve_config is defined %} +{% set _ = switch_item.update({'preserve_config': switch.preserve_config | default('') }) %} +{% else %} +{% set _ = switch_item.update({'preserve_config': false }) %} +{% endif %} +{% endif %} +{% set _ = switch_list.append(switch_item) %} +{% endfor %} +{{ switch_list | to_nice_yaml(indent=2) }} +{% endif %} \ No newline at end of file diff --git a/tests/integration/targets/nd_manage_switches/tests/deleted.yaml b/tests/integration/targets/nd_manage_switches/tests/deleted.yaml new file mode 100644 index 000000000..b5f4b6bd3 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/deleted.yaml @@ -0,0 +1,143 @@ +--- + +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: deleted + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- +# TC - 1 +- name: Deleted TC1 - Prepare Switches in Fabric - GreenField Deployment + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: merged_result + tags: deleted + +- name: Deleted TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Debug - Print Query Result + ansible.builtin.debug: + var: query_result + tags: deleted + +- name: Deleted TC1 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_base_conf }}" + changed: "{{merged_result.changed}}" + register: result + tags: deleted + +# TC - 2 +- name: Deleted TC2 - Delete a Switch from the Fabric + cisco.nd.nd_manage_switches: &conf_del + fabric: "{{ test_data.test_fabric }}" + state: deleted + config: + - seed_ip: "{{ test_data.sw1 }}" + register: delete_result + tags: deleted + +- name: Deleted TC2 - Prepare Test Data + ansible.builtin.set_fact: + nd_switches_delete_conf: "{{ nd_switches_base_conf | rejectattr('seed_ip', 'equalto', test_data.sw1) | list }}" + delegate_to: localhost + tags: deleted + +- name: Deleted TC2 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Debug - Print Query Result + ansible.builtin.debug: + var: query_result + tags: deleted + +- name: Deleted TC2 - Validate nd Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_delete_conf }}" + changed: "{{ delete_result.changed }}" + register: result + tags: deleted + +# TC - 3 +- name: Deleted TC3 - Removing a previously Deleted Switch - Idempotence + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + config: + - seed_ip: "{{ test_data.sw1 }}" + register: result + tags: deleted + +- name: Debug - Print Query Result + ansible.builtin.debug: + var: result + tags: deleted + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to delete - fabric already matches desired config"' + tags: deleted + +# TC - 4 +- name: Deleted TC4 - Delete all Switches from Fabric + cisco.nd.nd_manage_switches: &conf_del_all + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: delete_result + tags: deleted + +- name: Deleted TC4 - Prepare Test Data + ansible.builtin.set_fact: + nd_switches_delete_conf: [] + delegate_to: localhost + tags: deleted + +- name: Deleted TC4 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Debug - Print Query Result + ansible.builtin.debug: + var: query_result + tags: deleted + +- name: Deleted TC4 - Validate nd Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_delete_conf }}" + changed: "{{ delete_result.changed }}" + register: result + tags: deleted + +# TC - 5 +- name: Deleted TC5 - Delete all Switches from Fabric - Idempotence + cisco.nd.nd_manage_switches: *conf_del_all + register: result + tags: deleted + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to delete - fabric already matches desired config"' + tags: deleted \ No newline at end of file diff --git a/tests/integration/targets/nd_manage_switches/tests/gathered.yaml b/tests/integration/targets/nd_manage_switches/tests/gathered.yaml new file mode 100644 index 000000000..f34687573 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/gathered.yaml @@ -0,0 +1,64 @@ +--- +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: query + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- + +# TC - 1 +- name: Query TC1 - Merge a Switch using GreenField Deployment + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: create_result + tags: query + +- name: Query TC1 - Gather Switch State in Fabric + cisco.nd.nd_manage_switches: + state: gathered + fabric: "{{ test_data.test_fabric }}" + register: query_result + tags: query + +- name: Query TC1 - Build Gathered Lookup + ansible.builtin.set_fact: + gathered_seeds: "{{ query_result.gathered | map(attribute='seed_ip') | list }}" + gathered_role_map: "{{ query_result.gathered | items2dict(key_name='seed_ip', value_name='role') }}" + delegate_to: localhost + tags: query + +- name: Query TC1 - Validate Gathered Count + ansible.builtin.assert: + that: + - query_result.gathered | length == nd_switches_base_conf | length + fail_msg: >- + Gathered count {{ query_result.gathered | length }} does not match + expected {{ nd_switches_base_conf | length }} + tags: query + +- name: Query TC1 - Validate Each Switch Present and Role Matches + ansible.builtin.assert: + that: + - item.seed_ip in gathered_seeds + - "'role' not in item or gathered_role_map[item.seed_ip] == item.role" + fail_msg: >- + Switch {{ item.seed_ip }} missing from gathered output or role mismatch + (expected={{ item.role | default('any') }}, + got={{ gathered_role_map[item.seed_ip] | default('not found') }}) + loop: "{{ nd_switches_base_conf }}" + tags: query + +# ---------------------------------------------- +# Cleanup Fabric Switches +# ---------------------------------------------- + +- name: Query - Cleanup Fabric + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: result + tags: query diff --git a/tests/integration/targets/nd_manage_switches/tests/merged.yaml b/tests/integration/targets/nd_manage_switches/tests/merged.yaml new file mode 100644 index 000000000..a5b25f7d5 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/merged.yaml @@ -0,0 +1,318 @@ +--- + +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: merged + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- +# TC - 1 +- name: Merged TC1 - Merge a Switch using GreenField Deployment + cisco.nd.nd_manage_switches: &conf + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: merged_result + tags: merged + +- name: Merged TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Merged TC1 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_base_conf }}" + changed: "{{ merged_result.changed }}" + register: result + tags: merged + +# TC - 2 +- name: Merged TC2 - Idempotence + cisco.nd.nd_manage_switches: *conf + register: result + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to merge — fabric already matches desired config"' + tags: merged + +# TC - 3 +- name: Merged TC3 - Clean up Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: delete_result + tags: merged + +- name: Merged TC3 - Prepare Test Data + ansible.builtin.set_fact: + nd_switches_delete_conf: [] + delegate_to: localhost + tags: merged + +- name: Merged TC3 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Merged TC3 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_delete_conf }}" + changed: "{{ delete_result.changed }}" + register: result + tags: merged + +# TC - 4 +- name: Merged TC4 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + role: leaf + auth_proto: MD5 + preserve_config: true + delegate_to: localhost + tags: merged + +- name: Import Configuration Prepare Tasks + vars: + file: merge + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: merged + +- name: Merged TC4 - Merge a Switch using BrownField Deployment + cisco.nd.nd_manage_switches: &conf_bf + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_merge_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: merged_result + tags: merged + +- name: Merged TC4 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Merged TC4 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_merge_conf }}" + changed: "{{ merged_result.changed }}" + register: result + tags: merged + +# TC - 5 +- name: Merged TC5 - Verify Idempotence + cisco.nd.nd_manage_switches: *conf_bf + register: result + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to merge — fabric already matches desired config"' + tags: merged + +# TC - 6 +- name: Merged TC6 - Clean up Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: deleted_result + tags: merged + +- name: Merged TC6 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + delegate_to: localhost + tags: merged + +- name: Import Configuration Prepare Tasks + vars: + file: merge + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: merged + +- name: Merged TC6 - Merge a Switch using GreenField Deployment - Using default role/auth_proto + cisco.nd.nd_manage_switches: &conf_def + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_merge_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: merged_result + tags: merged + +- name: Merged TC6 - Prepare Config + ansible.builtin.set_fact: + nd_switches_mergev_conf: + - seed_ip: "{{ test_data.sw1 }}" + role: leaf # default role in ND + delegate_to: localhost + tags: merged + +- name: Merged TC6 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: deleted + +- name: Merged TC6 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_mergev_conf }}" + changed: "{{ merged_result.changed }}" + register: result + tags: merged + +# TC - 7 +- name: Merged TC7 - Verify Idempotence + cisco.nd.nd_manage_switches: *conf_def + register: result + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to merge — fabric already matches desired config"' + tags: merged + +# TC - 8 +- name: Merged TC8 - Clean up Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: deleted_result + tags: merged + +# TC - 9 +- name: Merged TC9 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: + role: leaf + auth_proto: MD5 + delegate_to: localhost + tags: merged + +- name: Import Configuration Prepare Tasks + vars: + file: merge + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: merged + +- name: Merged TC9 - Merge a Switch without seed_ip + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_merge_conf }}" + config_actions: "{{ test_data.config_actions }}" + ignore_errors: true + register: merged_result + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'merged_result.changed == false' + - '"seed_ip cannot be empty" in merged_result.msg' + tags: merged + +# TC - 10 +- name: Merged TC10 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + auth_proto: MD5 + role: invalid + delegate_to: localhost + tags: merged + +- name: Import Configuration Prepare Tasks + vars: + file: merge + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: merged + +- name: Merged TC10 - Merge a Switch with Invalid Role + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_merge_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: merged_result + ignore_errors: true + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'merged_result.changed == false' + - '"Invalid SwitchRole: invalid" in merged_result.msg' + tags: merged + +# TC - 11 +- name: Merged TC11 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + auth_proto: MD55DM + role: leaf + delegate_to: localhost + tags: merged + +- name: Import Configuration Prepare Tasks + vars: + file: merge + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: merged + +- name: Merged TC11 - Merge a Switch with invalid auth choice + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_merge_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: merged_result + ignore_errors: true + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'merged_result.changed == false' + - '"Invalid SnmpV3AuthProtocol: MD55DM" in merged_result.msg' + tags: merged + +# TC - 12 +- name: Merged TC12 - Merge a Switch without a config + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + register: merged_result + ignore_errors: true + tags: merged + +- name: Assert + ansible.builtin.assert: + that: + - 'merged_result.changed == false' + - '"state is merged but all of the following are missing: config" in merged_result.msg' + tags: merged \ No newline at end of file diff --git a/tests/integration/targets/nd_manage_switches/tests/overridden.yaml b/tests/integration/targets/nd_manage_switches/tests/overridden.yaml new file mode 100644 index 000000000..cee43173d --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/overridden.yaml @@ -0,0 +1,168 @@ +--- +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: overridden + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- + +# TC - 1 +- name: Overridden TC1 - Prepare Switches in Fabric - GreenField Deployment + cisco.nd.nd_manage_switches: &conf + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: merged_result + tags: overridden + +- name: Overridden TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: overridden + +- name: Overridden TC1 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_base_conf }}" + changed: " {{ merged_result.changed }}" + register: result + tags: overridden + +# TC - 2 +- name: Overridden TC2 - Verify Idempotence + cisco.nd.nd_manage_switches: *conf + register: result + tags: overridden + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to merge — fabric already matches desired config"' + tags: overridden + +# TC - 3 +- name: Overridden TC3 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + role: spine + preserve_config: false + delegate_to: localhost + tags: overridden + +- name: Import Configuration Prepare Tasks + vars: + file: overridden + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: overridden + +- name: Overridden TC3 - Override Existing Switch - Removes Other Switches from Fabric + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: overridden + config: "{{ nd_switches_overridden_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: overridden_result + tags: overridden + +- name: Overridden TC3 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: overridden + +- name: Overridden TC3 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_overridden_conf }}" + changed: "{{ overridden_result.changed }}" + register: result + tags: overridden + +# TC - 4 +- name: Overridden TC4 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + role: leaf + preserve_config: false + delegate_to: localhost + tags: overridden + +- name: Import Configuration Prepare Tasks + vars: + file: overridden + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: overridden + +- name: Overridden TC4 - New Role for the Existing Switch + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: overridden + config: "{{ nd_switches_overridden_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: overridden_result + tags: overridden + +- name: Overridden TC4 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: overridden + +- name: Overridden TC4 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_overridden_conf }}" + changed: "{{ overridden_result.changed }}" + register: result + tags: overridden + +# TC - 5 +- name: Overridden TC5 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + preserve_config: false + delegate_to: localhost + tags: overridden + +- name: Import Configuration Prepare Tasks + vars: + file: overridden + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: overridden + +- name: Overridden TC5 - Unspecified Role for the Existing Switch (Default, Leaf) + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: overridden + config: "{{ nd_switches_overridden_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: overridden_result + tags: overridden + +- name: Assert + ansible.builtin.assert: + that: + - 'overridden_result.changed == false' + - 'result.msg == "No switches to override — fabric already matches desired config"' + tags: overridden + +# ---------------------------------------------- +# Cleanup Fabric Switches +# ---------------------------------------------- + +- name: Overridden - Cleanup Fabric Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: result + tags: overridden diff --git a/tests/integration/targets/nd_manage_switches/tests/poap.yaml b/tests/integration/targets/nd_manage_switches/tests/poap.yaml new file mode 100644 index 000000000..31dc8f0b3 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/poap.yaml @@ -0,0 +1,265 @@ +--- +- name: Test Entry Point - [nd_manage_switches - Poap] + ansible.builtin.debug: + msg: + - "----------------------------------------------------------------" + - "+ Executing Poap Tests - [nd_manage_switches] +" + - "----------------------------------------------------------------" + tags: poap + +- name: Poap - Setup Internal TestCase Variables + ansible.builtin.set_fact: + test_data: + test_fabric: "{{ ansible_it_fabric }}" + sw1: "{{ ansible_switch1 }}" + sw1_serial: "1ABC23DEFGH" + sw2: "{{ ansible_switch2 }}" + sw2_serial: "1ABC23DEFHI" + poap_model: "ABC-D1230a" + poap_version: "1.2(3)" + prepro_hostname: "PreProv-SW" + poap_hostname: "Poap-SW" + poap_configmodel: "['ABC-D1230a']" + poap_gateway: "192.168.2.1/24" + sw3: "{{ ansible_switch3 }}" + config_actions: + save: "{{ save }}" + deploy: "{{ deploy }}" + type: "{{ config_actions_type }}" + poap_enabled: false + delegate_to: localhost + tags: poap + +# Below commented tasks are sample tasks to enable Bootstrap and DHCP along with DHCP configs +# Please make sure you provide correct values for required fields +# Fabric config has many ND/DCNM auto generated values, so always GET the configs first +# and then set the required values. +# +# +# - name: Poap Merged - Get the configs of the fabric deployed. +# cisco.nd.nd_rest: +# path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}" +# method: get +# register: result + +# - set_fact: +# result.jsondata.management.day0Bootstrap = true +# result.jsondata.management.localDhcpServer = true +# result.jsondata.management.dhcpProtocolVersion = "dhcpv4" +# result.jsondata.management.dhcpStartAddress = "192.168.1.10" +# result.jsondata.management.dhcpEndAddress = "192.168.1.20" +# result.jsondata.management.managementGateway = "192.168.1.1" +# result.jsondata.management.managementIpv4Prefix = "24" +# +# - name: Poap Merged - Configure Bootstrap and DHCP on Fabric +# cisco.nd.nd_rest: +# method: PUT +# path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}" +# content: "{{ result.jsondata }}" +# + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- +# Base Tests +- name: Base - Verify fabric is reachable via API + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}" + method: get + register: fabric_query + ignore_errors: true + tags: poap + +- name: Base - Assert fabric exists + ansible.builtin.assert: + that: + - fabric_query.status == 200 + fail_msg: "Fabric '{{ test_data.test_fabric }}' not found (HTTP {{ fabric_query.status }})." + success_msg: "Fabric '{{ test_data.test_fabric }}' found." + tags: poap + +- name: POAP Base Task - Set Variable + ansible.builtin.set_fact: + poap_enabled: true + when: fabric_query.status == 200 and fabric_query.jsondata.management.day0Bootstrap + tags: poap + +# TC1 +- name: POAP TC1 - Prepare Validate Config + ansible.builtin.set_fact: + nd_switches_delete_conf: + delegate_to: localhost + tags: poap + +- name: POAP TC1 - Clean Up Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: deleted_result + tags: poap + +- name: POAP TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: poap + +- name: POAP TC1 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_delete_conf }}" + register: result + tags: poap + +# ---------------------------------------------- # +# Merged # +# ---------------------------------------------- # + +# TC - 1 +- name: Poap TC1 - Prepare Configuration + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + username: '{{ switch_username }}' + password: '{{ switch_password }}' + role: border + preprovision: + serial_number: "{{ test_data.sw2_serial }}" + model: "{{ test_data.poap_model }}" + version: "{{ test_data.poap_version }}" + hostname: "{{ test_data.prepro_hostname }}" + config_data: + models: "{{ test_data.poap_configmodel }}" + gateway: "{{ test_data.poap_gateway }}" + when: poap_enabled == True + delegate_to: localhost + tags: poap + +- name: Import Configuration Prepare Tasks + vars: + file: poap + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + when: poap_enabled == True + tags: poap + +- name: Poap TC1 - Merged - Pre-provisioned Switch Configuration + cisco.nd.nd_manage_switches: &conf_prepro + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_poap_conf }}" + config_actions: "{{ test_data.config_actions }}" + when: poap_enabled == True + register: merged_result + tags: poap + +- name: Poap TC1 - Merged - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + when: poap_enabled == True + register: query_result + tags: poap + +- name: Poap TC1 - Merged - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_poap_conf }}" + changed: "{{ merged_result.changed }}" + when: poap_enabled == True + register: result + tags: poap + +# TC - 2 +- name: Poap TC2 - Merged - Verify Idempotence + cisco.nd.nd_manage_switches: *conf_prepro + when: poap_enabled == True + register: merged_result + tags: poap + +- name: Assert + ansible.builtin.assert: + that: + - 'merged_result.changed == false' + - 'merged_result.msg == "No switches to merge — fabric already matches desired config"' + when: poap_enabled == True + tags: poap + +# TC - 3 +- name: Poap TC3 - Prepare Configuration + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + username: '{{ switch_username }}' + password: '{{ switch_password }}' + role: leaf + poap: + serial_number: "{{ test_data.sw1_serial }}" + hostname: "{{ test_data.poap_hostname }}" + - seed_ip: "{{ test_data.sw3 }}" + auth_proto: MD5 + role: spine + when: poap_enabled == True + delegate_to: localhost + tags: poap + +- name: Import Configuration Prepare Tasks + vars: + file: poap + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + when: poap_enabled == True + tags: poap + +- name: Poap TC3 - Merge Config + cisco.nd.nd_manage_switches: &conf_poap + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_poap_conf }}" + config_actions: "{{ test_data.config_actions }}" + when: poap_enabled == True + register: merged_result + tags: poap + +- name: Poap TC3 - Merged - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + when: poap_enabled == True + register: query_result + tags: poap + +- name: Poap TC3 - Merged - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_poap_conf }}" + changed: "{{ merged_result.changed }}" + when: poap_enabled == True + register: result + tags: poap + +# TC - 4 +- name: Poap TC4 - Verify Idempotence + cisco.nd.nd_manage_switches: *conf_poap + when: poap_enabled == True + register: result + tags: poap + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to merge — fabric already matches desired config"' + when: poap_enabled == True + tags: poap + +# ---------------------------------------------- +# Cleanup Fabric Switches +# ---------------------------------------------- + +- name: Poap - Clean Up Existing Devices + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + when: poap_enabled == True + register: deleted_result + tags: poap diff --git a/tests/integration/targets/nd_manage_switches/tests/replaced.yaml b/tests/integration/targets/nd_manage_switches/tests/replaced.yaml new file mode 100644 index 000000000..70415a192 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/replaced.yaml @@ -0,0 +1,136 @@ +--- +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: replaced + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- + +# TC - 1 +- name: Replaced TC1 - Prepare Switches in Fabric - GreenField Deployment + cisco.nd.nd_manage_switches: &conf + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: merged_result + tags: replaced + +- name: Replaced TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: replaced + +- name: Replaced TC1 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_base_conf }}" + changed: " {{ merged_result.changed }}" + register: result + tags: replaced + +# TC - 2 +- name: Replaced TC2 - Verify Idempotence + cisco.nd.nd_manage_switches: *conf + register: result + tags: replaced + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to merge — fabric already matches desired config"' + tags: replaced + +# TC - 3 +- name: Replaced TC3 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + role: leaf + preserve_config: false + delegate_to: localhost + tags: replaced + +- name: Import Configuration Prepare Tasks + vars: + file: replaced + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: replaced + +- name: Replaced TC3 - New Role for the Existing Switch + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: replaced + config: "{{ nd_switches_replaced_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: replaced_result + tags: replaced + +- name: Replaced TC3 - Prepare Test Data + ansible.builtin.set_fact: + nd_switches_replaced_conf: >- + {{ (nd_switches_base_conf | rejectattr('seed_ip', 'equalto', test_data.sw2) | list) + + [nd_switches_base_conf | selectattr('seed_ip', 'equalto', test_data.sw2) | first | combine({'role': 'leaf'})] }} + delegate_to: localhost + tags: replaced + +- name: Replaced TC3 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: replaced + +- name: Replaced TC3 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_replaced_conf }}" + changed: "{{ replaced_result.changed }}" + register: result + tags: replaced + +# TC - 4 +- name: Replaced TC4 - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + preserve_config: false + delegate_to: localhost + tags: replaced + +- name: Import Configuration Prepare Tasks + vars: + file: replaced + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: replaced + +- name: Replaced TC4 - Unspecified Role for the Existing Switch (Default, Leaf) + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: replaced + config: "{{ nd_switches_replaced_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: replaced_result + tags: replaced + +- name: Assert + ansible.builtin.assert: + that: + - 'replaced_result.changed == false' + - 'result.msg == "No switches to replace — fabric already matches desired config"' + tags: replaced + +# ---------------------------------------------- +# Cleanup Fabric Switches +# ---------------------------------------------- + +- name: Replaced - Cleanup Fabric Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: result + tags: replaced diff --git a/tests/integration/targets/nd_manage_switches/tests/rma.yaml b/tests/integration/targets/nd_manage_switches/tests/rma.yaml new file mode 100644 index 000000000..009231d40 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/rma.yaml @@ -0,0 +1,173 @@ +--- +- name: Test Entry Point - [nd_manage_switches - RMA] + ansible.builtin.debug: + msg: + - "----------------------------------------------------------------" + - "+ Executing RMA Tests - [nd_manage_switches] +" + - "----------------------------------------------------------------" + tags: rma + +- name: RMA - Setup Internal TestCase Variables + ansible.builtin.set_fact: + test_data: + test_fabric: "{{ ansible_it_fabric }}" + sw1: "{{ ansible_switch1 }}" + sw1_serial: "1ABC23DEFGH" + sw1_rma_serial: "1ABC23DERMA" + config_actions: + save: "{{ save }}" + deploy: "{{ deploy }}" + type: "{{ config_actions_type }}" + rma_enabled: false + delegate_to: localhost + tags: rma + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- +# Base Tests +- name: Base - Verify fabric is reachable via API + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}" + method: get + register: fabric_query + ignore_errors: true + +- name: Base - Assert fabric exists + ansible.builtin.assert: + that: + - fabric_query.status == 200 + fail_msg: "Fabric '{{ test_data.test_fabric }}' not found (HTTP {{ fabric_query.status }})." + success_msg: "Fabric '{{ test_data.test_fabric }}' found." + +- name: RMA Base Task - Set Variable + ansible.builtin.set_fact: + rma_enabled: true + when: fabric_query.status == 200 and fabric_query.jsondata.management.day0Bootstrap + tags: rma + +# TC1 +- name: RMA TC1 - Prepare Validate Config + ansible.builtin.set_fact: + nd_switches_delete_conf: + delegate_to: localhost + tags: rma + +- name: RMA TC1 - Clean Up Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: deleted_result + tags: rma + +- name: RMA TC1 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: rma + +- name: RMA TC1 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_delete_conf }}" + register: result + tags: rma + +# Tasks to add a switch to fabric and to configure and deploy +# the switch in maintenance mode. +# Please note that the switch should be shutdown after configuring it +# in maintenance mode + +# TC2 +- name: RMA TC2 - Prepare Configuration + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + auth_proto: MD5 + when: rma_enabled == True + delegate_to: localhost + tags: rma + +- name: Import Configuration Prepare Tasks + vars: + file: rma + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + when: rma_enabled == True + tags: rma + +- name: RMA TC2 - Add Switch to the Fabric + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_rma_conf }}" + config_actions: "{{ test_data.config_actions }}" + when: rma_enabled == True + register: merged_result + tags: rma + +- name: RMA TC2 - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + when: rma_enabled == True + register: query_result + tags: rma + +- name: RMA TC2 - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_rma_conf }}" + when: rma_enabled == True + register: result + tags: rma + +- name: RMA TC2 - Change System Mode to Maintenance, Deploy and Block until Complete + cisco.nd.nd_rest: + path: "/api/v1/manage/inventory/switchActions/changeSystemMode?deploy=true&blocking=true" + method: POST + content: + mode: "maintenance" + switchIds: + - "{{ test_data.sw1_serial }}" + register: change_system_mode_result + when: (rma_enabled == True) + tags: rma + +# TC3 +- block: + - name: RMA TC3 - RMA the Existing Switch + cisco.nd.nd_manage_switches: + fabric: '{{ test_data.test_fabric }}' + state: merged + config: + - seed_ip: '{{ test_data.sw1 }}' + username: '{{ switch_username }}' + password: '{{ switch_password }}' + rma: + - new_serial_number: '{{ test_data.sw1_rma_serial }}' + register: result + + - name: ASSERT - Check condition + ansible.builtin.assert: + that: + - 'result.changed == true' + + - name: ASSERT - Check condition + ansible.builtin.assert: + that: + - 'item["RETURN_CODE"] == 200' + loop: '{{ result.response }}' + when: (rma_enabled == True) + tags: rma + +# ---------------------------------------------- +# Cleanup Fabric Switches +# ---------------------------------------------- + +- name: RMA - Clean Up - Remove Existing Switches + cisco.nd.nd_manage_switches: + fabric: "{{ test_data.test_fabric }}" + state: deleted + register: result + tags: rma diff --git a/tests/integration/targets/nd_manage_switches/tests/sanity.yaml b/tests/integration/targets/nd_manage_switches/tests/sanity.yaml new file mode 100644 index 000000000..4d93bae01 --- /dev/null +++ b/tests/integration/targets/nd_manage_switches/tests/sanity.yaml @@ -0,0 +1,257 @@ +--- + +- name: Import ND Manage Switches Base Tasks + ansible.builtin.import_tasks: ../../tasks/base_tasks.yaml + tags: sanity + +# ---------------------------------------------- +# Run Test Cases +# ---------------------------------------------- + +# ---------------------------------------------- # +# Merged # +# ---------------------------------------------- # + +# TC - 1 +- name: Sanity TC1 - Merged - Prepare Switches in Fabric - GreenField Deployment + cisco.nd.nd_manage_switches: &conf + fabric: "{{ test_data.test_fabric }}" + state: merged + config: "{{ nd_switches_base_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: create_result + tags: sanity + +- name: Sanity TC1 - Merged - Query Inventory State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: sanity + +- name: Sanity TC1 - Merged - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_base_conf }}" + changed: "{{ create_result.changed }}" + register: result + tags: sanity + +# TC - 2 +- name: Sanity TC2 - Merged - Idempotence + cisco.nd.nd_manage_switches: *conf + register: result + tags: sanity + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to merge — fabric already matches desired config"' + tags: sanity + +# ---------------------------------------------- # +# Gathered # +# ---------------------------------------------- # + +# TC - 2 +- name: Sanity TC2 - Gathered - Gather Switch State in Fabric + cisco.nd.nd_manage_switches: + state: gathered + fabric: "{{ test_data.test_fabric }}" + register: gathered_result + tags: sanity + +- name: Sanity TC2 - Gathered - Build Gathered Lookup + ansible.builtin.set_fact: + gathered_seeds: "{{ gathered_result.gathered | map(attribute='seed_ip') | list }}" + gathered_role_map: "{{ gathered_result.gathered | items2dict(key_name='seed_ip', value_name='role') }}" + delegate_to: localhost + tags: sanity + +- name: Sanity TC2 - Gathered - Validate Gathered Count + ansible.builtin.assert: + that: + - gathered_result.gathered | length == nd_switches_base_conf | length + fail_msg: >- + Gathered count {{ gathered_result.gathered | length }} does not match + expected {{ nd_switches_base_conf | length }} + tags: sanity + +- name: Sanity TC2 - Gathered - Validate Each Switch Present and Role Matches + ansible.builtin.assert: + that: + - item.seed_ip in gathered_seeds + - "'role' not in item or gathered_role_map[item.seed_ip] == item.role" + fail_msg: >- + Switch {{ item.seed_ip }} missing from gathered output or role mismatch + (expected={{ item.role | default('any') }}, + got={{ gathered_role_map[item.seed_ip] | default('not found') }}) + loop: "{{ nd_switches_base_conf }}" + tags: sanity + +# ---------------------------------------------- # +# Replaced # +# ---------------------------------------------- # +# TC - 3 +- name: Sanity TC3 - Replaced - Prepare Config + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw1 }}" + role: spine + preserve_config: false + delegate_to: localhost + tags: sanity + +- name: Import Configuration Prepare Tasks + vars: + file: sanity + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: sanity + +- name: Sanity TC3 - Replaced - New Role for the Existing Switch + cisco.nd.nd_manage_switches: &conf_replace + fabric: "{{ test_data.test_fabric }}" + state: replaced + config: "{{ nd_switches_sanity_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: replaced_result + tags: sanity + +- name: Sanity TC3 - Replaced - Prepare Test Data + ansible.builtin.set_fact: + nd_switches_replaced_conf: >- + {{ (nd_switches_base_conf | rejectattr('seed_ip', 'equalto', test_data.sw1) | list) + + [nd_switches_base_conf | selectattr('seed_ip', 'equalto', test_data.sw1) | first | combine({'role': 'spine'})] }} + delegate_to: localhost + tags: sanity + +- name: Sanity TC3 - Replaced - Query Switch State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: sanity + +- name: Sanity TC3 - Replaced - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_replaced_conf }}" + changed: "{{ replaced_result.changed }}" + register: result + tags: sanity + +- name: Sanity TC3 - Replaced - Idempotence + cisco.nd.nd_manage_switches: *conf_replace + register: result + tags: sanity + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to replace — fabric already matches desired config"' + tags: sanity + +# ---------------------------------------------- # +# Overridden # +# ---------------------------------------------- # + +# TC - 4 +- name: Sanity TC4 - Overridden - Prepare Conf + ansible.builtin.set_fact: + switch_conf: + - seed_ip: "{{ test_data.sw2 }}" + role: leaf + preserve_config: false + delegate_to: localhost + tags: sanity + +- name: Import Configuration Prepare Tasks + vars: + file: sanity + ansible.builtin.import_tasks: ../../tasks/conf_prep_tasks.yaml + tags: sanity + +- name: Sanity TC4 - Overridden - Update a New Switch using GreenField Deployment - Delete and Create - default role + cisco.nd.nd_manage_switches: &conf_over + fabric: "{{ test_data.test_fabric }}" + state: overridden + config: "{{ nd_switches_sanity_conf }}" + config_actions: "{{ test_data.config_actions }}" + register: overriden_result + tags: sanity + +- name: Sanity TC4 - Overridden - Query Inventory State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: sanity + +- name: Sanity TC4 - Overridden - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_sanity_conf }}" + changed: "{{ overriden_result.changed }}" + register: result + tags: sanity + +- name: Sanity TC4 - Overridden - Idempotence + cisco.nd.nd_manage_switches: *conf_over + register: result + tags: sanity + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to override — fabric already matches desired config"' + tags: sanity + +# ---------------------------------------------- # +# Clean-up # +# ---------------------------------------------- # + +# TC - 5 +- name: Sanity TC5 - Deleted - Clean up Existing devices + cisco.nd.nd_manage_switches: &clean + fabric: "{{ test_data.test_fabric }}" + state: deleted + config: "{{ nd_switches_sanity_conf }}" + register: deleted_result + tags: sanity + +- name: Sanity TC5 - Reset - Prepare Conf + ansible.builtin.set_fact: + nd_switches_sanity_conf: + delegate_to: localhost + tags: sanity + +- name: Sanity TC5 - Deleted - Query Inventory State in Fabric + cisco.nd.nd_rest: + path: "/api/v1/manage/fabrics/{{ test_data.test_fabric }}/switches" + method: get + register: query_result + tags: sanity + +- name: Sanity TC5 - Deleted - Validate ND Data + cisco.nd.nd_switches_validate: + nd_data: "{{ query_result }}" + test_data: "{{ nd_switches_sanity_conf }}" + changed: "{{ deleted_result.changed }}" + register: result + tags: sanity + +# TC - 6 +- name: Sanity TC6 - Deleted - Idempotence + cisco.nd.nd_manage_switches: *clean + register: result + tags: sanity + +- name: Assert + ansible.builtin.assert: + that: + - 'result.changed == false' + - 'result.msg == "No switches to delete - fabric already matches desired config"' + tags: sanity \ No newline at end of file diff --git a/tests/sanity/ignore-2.16.txt b/tests/sanity/ignore-2.16.txt new file mode 100644 index 000000000..c3ca42368 --- /dev/null +++ b/tests/sanity/ignore-2.16.txt @@ -0,0 +1 @@ +plugins/action/nd_switches_validate.py action-plugin-docs diff --git a/tests/sanity/ignore-2.17.txt b/tests/sanity/ignore-2.17.txt new file mode 100644 index 000000000..c3ca42368 --- /dev/null +++ b/tests/sanity/ignore-2.17.txt @@ -0,0 +1 @@ +plugins/action/nd_switches_validate.py action-plugin-docs diff --git a/tests/sanity/ignore-2.18.txt b/tests/sanity/ignore-2.18.txt new file mode 100644 index 000000000..c3ca42368 --- /dev/null +++ b/tests/sanity/ignore-2.18.txt @@ -0,0 +1 @@ +plugins/action/nd_switches_validate.py action-plugin-docs diff --git a/tests/sanity/ignore-2.19.txt b/tests/sanity/ignore-2.19.txt new file mode 100644 index 000000000..c3ca42368 --- /dev/null +++ b/tests/sanity/ignore-2.19.txt @@ -0,0 +1 @@ +plugins/action/nd_switches_validate.py action-plugin-docs diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_credentials_switches.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_credentials_switches.py new file mode 100644 index 000000000..a3a088b26 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_credentials_switches.py @@ -0,0 +1,177 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_credentials_switches.py + +Tests the ND Manage Credentials Switches endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest # pylint: disable=unused-import +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_credentials_switches import ( + CredentialsSwitchesEndpointParams, + EpManageCredentialsSwitchesPost, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: CredentialsSwitchesEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_credentials_switches_00010(): + """ + # Summary + + Verify CredentialsSwitchesEndpointParams default values + + ## Test + + - ticket_id defaults to None + + ## Classes and Methods + + - CredentialsSwitchesEndpointParams.__init__() + """ + with does_not_raise(): + params = CredentialsSwitchesEndpointParams() + assert params.ticket_id is None + + +def test_endpoints_api_v1_manage_credentials_switches_00020(): + """ + # Summary + + Verify CredentialsSwitchesEndpointParams ticket_id can be set + + ## Test + + - ticket_id can be set to a string value + + ## Classes and Methods + + - CredentialsSwitchesEndpointParams.__init__() + """ + with does_not_raise(): + params = CredentialsSwitchesEndpointParams(ticket_id="CHG12345") + assert params.ticket_id == "CHG12345" + + +def test_endpoints_api_v1_manage_credentials_switches_00030(): + """ + # Summary + + Verify CredentialsSwitchesEndpointParams generates correct query string + + ## Test + + - to_query_string() returns ticketId=CHG12345 when ticket_id is set + + ## Classes and Methods + + - CredentialsSwitchesEndpointParams.to_query_string() + """ + with does_not_raise(): + params = CredentialsSwitchesEndpointParams(ticket_id="CHG12345") + result = params.to_query_string() + assert result == "ticketId=CHG12345" + + +def test_endpoints_api_v1_manage_credentials_switches_00040(): + """ + # Summary + + Verify CredentialsSwitchesEndpointParams returns empty query string when no params set + + ## Test + + - to_query_string() returns empty string when ticket_id is not set + + ## Classes and Methods + + - CredentialsSwitchesEndpointParams.to_query_string() + """ + with does_not_raise(): + params = CredentialsSwitchesEndpointParams() + result = params.to_query_string() + assert result == "" + + +# ============================================================================= +# Test: EpManageCredentialsSwitchesPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_credentials_switches_00100(): + """ + # Summary + + Verify EpManageCredentialsSwitchesPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageCredentialsSwitchesPost.__init__() + - EpManageCredentialsSwitchesPost.class_name + - EpManageCredentialsSwitchesPost.verb + """ + with does_not_raise(): + instance = EpManageCredentialsSwitchesPost() + assert instance.class_name == "EpManageCredentialsSwitchesPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_credentials_switches_00110(): + """ + # Summary + + Verify EpManageCredentialsSwitchesPost path without query params + + ## Test + + - path returns the correct base endpoint path + + ## Classes and Methods + + - EpManageCredentialsSwitchesPost.path + """ + with does_not_raise(): + instance = EpManageCredentialsSwitchesPost() + result = instance.path + assert result == "/api/v1/manage/credentials/switches" + + +def test_endpoints_api_v1_manage_credentials_switches_00120(): + """ + # Summary + + Verify EpManageCredentialsSwitchesPost path with ticket_id + + ## Test + + - path includes ticketId in query string when set + + ## Classes and Methods + + - EpManageCredentialsSwitchesPost.path + """ + with does_not_raise(): + instance = EpManageCredentialsSwitchesPost() + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result == "/api/v1/manage/credentials/switches?ticketId=CHG12345" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py index cb1b17d4f..5e4b674a7 100644 --- a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics.py @@ -16,12 +16,14 @@ import pytest # pylint: disable=unused-import from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics import ( + EpManageFabricConfigDeployPost, EpManageFabricsDelete, EpManageFabricsGet, EpManageFabricsListGet, EpManageFabricsPost, EpManageFabricsPut, EpManageFabricsSummaryGet, + FabricConfigDeployEndpointParams, ) from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( @@ -53,7 +55,7 @@ def test_endpoints_api_v1_manage_fabrics_00010(): """ with does_not_raise(): instance = EpManageFabricsGet() - assert instance.class_name == "EpApiV1ManageFabricsGet" + assert instance.class_name == "EpManageFabricsGet" assert instance.verb == HttpVerbEnum.GET @@ -146,7 +148,7 @@ def test_endpoints_api_v1_manage_fabrics_00100(): """ with does_not_raise(): instance = EpManageFabricsListGet() - assert instance.class_name == "EpApiV1ManageFabricsListGet" + assert instance.class_name == "EpManageFabricsListGet" assert instance.verb == HttpVerbEnum.GET @@ -296,7 +298,7 @@ def test_endpoints_api_v1_manage_fabrics_00200(): """ with does_not_raise(): instance = EpManageFabricsPost() - assert instance.class_name == "EpApiV1ManageFabricsPost" + assert instance.class_name == "EpManageFabricsPost" assert instance.verb == HttpVerbEnum.POST @@ -388,7 +390,7 @@ def test_endpoints_api_v1_manage_fabrics_00300(): """ with does_not_raise(): instance = EpManageFabricsPut() - assert instance.class_name == "EpApiV1ManageFabricsPut" + assert instance.class_name == "EpManageFabricsPut" assert instance.verb == HttpVerbEnum.PUT @@ -481,7 +483,7 @@ def test_endpoints_api_v1_manage_fabrics_00400(): """ with does_not_raise(): instance = EpManageFabricsDelete() - assert instance.class_name == "EpApiV1ManageFabricsDelete" + assert instance.class_name == "EpManageFabricsDelete" assert instance.verb == HttpVerbEnum.DELETE @@ -574,7 +576,7 @@ def test_endpoints_api_v1_manage_fabrics_00500(): """ with does_not_raise(): instance = EpManageFabricsSummaryGet() - assert instance.class_name == "EpApiV1ManageFabricsSummaryGet" + assert instance.class_name == "EpManageFabricsSummaryGet" assert instance.verb == HttpVerbEnum.GET @@ -734,3 +736,180 @@ def test_endpoints_api_v1_manage_fabrics_00620(): with pytest.raises(ValueError): instance = EpManageFabricsGet() instance.fabric_name = "a" * 65 + + +# ============================================================================= +# Test: FabricConfigDeployEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00700(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams default values + + ## Test + + - force_show_run defaults to None + - incl_all_msd_switches defaults to None + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams() + assert params.force_show_run is None + assert params.incl_all_msd_switches is None + + +def test_endpoints_api_v1_manage_fabrics_00710(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams force_show_run can be set + + ## Test + + - force_show_run can be set to True + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams(force_show_run=True) + assert params.force_show_run is True + + +def test_endpoints_api_v1_manage_fabrics_00720(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams generates query string with both params + + ## Test + + - to_query_string() includes forceShowRun and inclAllMsdSwitches when both are set + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams(force_show_run=True, incl_all_msd_switches=True) + result = params.to_query_string() + assert "forceShowRun=true" in result + assert "inclAllMsdSwitches=true" in result + + +def test_endpoints_api_v1_manage_fabrics_00730(): + """ + # Summary + + Verify FabricConfigDeployEndpointParams returns empty query string when no params set + + ## Test + + - to_query_string() returns empty string when no params set + + ## Classes and Methods + + - FabricConfigDeployEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricConfigDeployEndpointParams() + result = params.to_query_string() + assert result == "" + + +# ============================================================================= +# Test: EpManageFabricConfigDeployPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_00800(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.__init__() + - EpManageFabricConfigDeployPost.class_name + - EpManageFabricConfigDeployPost.verb + """ + with does_not_raise(): + instance = EpManageFabricConfigDeployPost() + assert instance.class_name == "EpManageFabricConfigDeployPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_00810(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.path + """ + instance = EpManageFabricConfigDeployPost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_00820(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.path + """ + with does_not_raise(): + instance = EpManageFabricConfigDeployPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy" + + +def test_endpoints_api_v1_manage_fabrics_00830(): + """ + # Summary + + Verify EpManageFabricConfigDeployPost path with force_show_run + + ## Test + + - path includes forceShowRun in query string when set to True + + ## Classes and Methods + + - EpManageFabricConfigDeployPost.path + """ + with does_not_raise(): + instance = EpManageFabricConfigDeployPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.force_show_run = True + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/actions/configDeploy?forceShowRun=true" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py new file mode 100644 index 000000000..fe913d48f --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_actions.py @@ -0,0 +1,162 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics_actions.py + +Tests the ND Manage Fabrics Actions endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_actions import ( + EpManageFabricsActionsConfigSavePost, + EpManageFabricsActionsShallowDiscoveryPost, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: EpManageFabricsActionsShallowDiscoveryPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_actions_00100(): + """ + # Summary + + Verify EpManageFabricsActionsShallowDiscoveryPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsActionsShallowDiscoveryPost.__init__() + - EpManageFabricsActionsShallowDiscoveryPost.class_name + - EpManageFabricsActionsShallowDiscoveryPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsActionsShallowDiscoveryPost() + assert instance.class_name == "EpManageFabricsActionsShallowDiscoveryPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_actions_00110(): + """ + # Summary + + Verify EpManageFabricsActionsShallowDiscoveryPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsActionsShallowDiscoveryPost.path + """ + instance = EpManageFabricsActionsShallowDiscoveryPost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_actions_00120(): + """ + # Summary + + Verify EpManageFabricsActionsShallowDiscoveryPost path + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsActionsShallowDiscoveryPost.path + """ + with does_not_raise(): + instance = EpManageFabricsActionsShallowDiscoveryPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/actions/shallowDiscovery" + + +# ============================================================================= +# Test: EpManageFabricsActionsConfigSavePost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_actions_00200(): + """ + # Summary + + Verify EpManageFabricsActionsConfigSavePost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsActionsConfigSavePost.__init__() + - EpManageFabricsActionsConfigSavePost.class_name + - EpManageFabricsActionsConfigSavePost.verb + """ + with does_not_raise(): + instance = EpManageFabricsActionsConfigSavePost() + assert instance.class_name == "EpManageFabricsActionsConfigSavePost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_actions_00210(): + """ + # Summary + + Verify EpManageFabricsActionsConfigSavePost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsActionsConfigSavePost.path + """ + instance = EpManageFabricsActionsConfigSavePost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_actions_00220(): + """ + # Summary + + Verify EpManageFabricsActionsConfigSavePost path + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsActionsConfigSavePost.path + """ + with does_not_raise(): + instance = EpManageFabricsActionsConfigSavePost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/actions/configSave" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py new file mode 100644 index 000000000..89349b151 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_bootstrap.py @@ -0,0 +1,206 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics_bootstrap.py + +Tests the ND Manage Fabrics Bootstrap endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_bootstrap import ( + EpManageFabricsBootstrapGet, + FabricsBootstrapEndpointParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: FabricsBootstrapEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00010(): + """ + # Summary + + Verify FabricsBootstrapEndpointParams default values + + ## Test + + - max defaults to None + - offset defaults to None + - filter defaults to None + + ## Classes and Methods + + - FabricsBootstrapEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricsBootstrapEndpointParams() + assert params.max is None + assert params.offset is None + assert params.filter is None + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00020(): + """ + # Summary + + Verify FabricsBootstrapEndpointParams max can be set + + ## Test + + - max can be set to an integer value + + ## Classes and Methods + + - FabricsBootstrapEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricsBootstrapEndpointParams(max=50) + assert params.max == 50 + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00030(): + """ + # Summary + + Verify FabricsBootstrapEndpointParams generates query string with pagination + + ## Test + + - to_query_string() returns correct format with max and offset + + ## Classes and Methods + + - FabricsBootstrapEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricsBootstrapEndpointParams(max=50, offset=0) + result = params.to_query_string() + assert "max=50" in result + assert "offset=0" in result + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00040(): + """ + # Summary + + Verify FabricsBootstrapEndpointParams returns empty query string when no params set + + ## Test + + - to_query_string() returns empty string when no params set + + ## Classes and Methods + + - FabricsBootstrapEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricsBootstrapEndpointParams() + result = params.to_query_string() + assert result == "" + + +# ============================================================================= +# Test: EpManageFabricsBootstrapGet +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00100(): + """ + # Summary + + Verify EpManageFabricsBootstrapGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpManageFabricsBootstrapGet.__init__() + - EpManageFabricsBootstrapGet.class_name + - EpManageFabricsBootstrapGet.verb + """ + with does_not_raise(): + instance = EpManageFabricsBootstrapGet() + assert instance.class_name == "EpManageFabricsBootstrapGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00110(): + """ + # Summary + + Verify EpManageFabricsBootstrapGet raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsBootstrapGet.path + """ + instance = EpManageFabricsBootstrapGet() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00120(): + """ + # Summary + + Verify EpManageFabricsBootstrapGet path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsBootstrapGet.path + """ + with does_not_raise(): + instance = EpManageFabricsBootstrapGet() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/bootstrap" + + +def test_endpoints_api_v1_manage_fabrics_bootstrap_00130(): + """ + # Summary + + Verify EpManageFabricsBootstrapGet path with pagination params + + ## Test + + - path includes max and offset in query string when set + + ## Classes and Methods + + - EpManageFabricsBootstrapGet.path + """ + with does_not_raise(): + instance = EpManageFabricsBootstrapGet() + instance.fabric_name = "MyFabric" + instance.endpoint_params.max = 50 + instance.endpoint_params.offset = 0 + result = instance.path + assert result.startswith("/api/v1/manage/fabrics/MyFabric/bootstrap?") + assert "max=50" in result + assert "offset=0" in result diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py new file mode 100644 index 000000000..79de46b7b --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_inventory.py @@ -0,0 +1,92 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics_inventory.py + +Tests the ND Manage Fabrics Inventory endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_inventory import ( + EpManageFabricsInventoryDiscoverGet, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: EpManageFabricsInventoryDiscoverGet +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_inventory_00010(): + """ + # Summary + + Verify EpManageFabricsInventoryDiscoverGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpManageFabricsInventoryDiscoverGet.__init__() + - EpManageFabricsInventoryDiscoverGet.class_name + - EpManageFabricsInventoryDiscoverGet.verb + """ + with does_not_raise(): + instance = EpManageFabricsInventoryDiscoverGet() + assert instance.class_name == "EpManageFabricsInventoryDiscoverGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_manage_fabrics_inventory_00020(): + """ + # Summary + + Verify EpManageFabricsInventoryDiscoverGet raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsInventoryDiscoverGet.path + """ + instance = EpManageFabricsInventoryDiscoverGet() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_inventory_00030(): + """ + # Summary + + Verify EpManageFabricsInventoryDiscoverGet path + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsInventoryDiscoverGet.path + """ + with does_not_raise(): + instance = EpManageFabricsInventoryDiscoverGet() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/inventory/discover" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py new file mode 100644 index 000000000..72802bfc7 --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switchactions.py @@ -0,0 +1,491 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics_switchactions.py + +Tests the ND Manage Fabrics Switch Actions endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switchactions import ( + EpManageFabricsSwitchActionsChangeRolesPost, + EpManageFabricsSwitchActionsImportBootstrapPost, + EpManageFabricsSwitchActionsPreProvisionPost, + EpManageFabricsSwitchActionsRediscoverPost, + EpManageFabricsSwitchActionsRemovePost, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: EpManageFabricsSwitchActionsRemovePost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00100(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRemovePost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRemovePost.__init__() + - EpManageFabricsSwitchActionsRemovePost.class_name + - EpManageFabricsSwitchActionsRemovePost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRemovePost() + assert instance.class_name == "EpManageFabricsSwitchActionsRemovePost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00110(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRemovePost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRemovePost.path + """ + instance = EpManageFabricsSwitchActionsRemovePost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00120(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRemovePost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRemovePost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRemovePost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/remove" + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00130(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRemovePost path with force and ticket_id + + ## Test + + - path includes force and ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRemovePost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRemovePost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.force = True + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result.startswith("/api/v1/manage/fabrics/MyFabric/switchActions/remove?") + assert "force=true" in result + assert "ticketId=CHG12345" in result + + +# ============================================================================= +# Test: EpManageFabricsSwitchActionsChangeRolesPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00200(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsChangeRolesPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchActionsChangeRolesPost.__init__() + - EpManageFabricsSwitchActionsChangeRolesPost.class_name + - EpManageFabricsSwitchActionsChangeRolesPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsChangeRolesPost() + assert instance.class_name == "EpManageFabricsSwitchActionsChangeRolesPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00210(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsChangeRolesPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchActionsChangeRolesPost.path + """ + instance = EpManageFabricsSwitchActionsChangeRolesPost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00220(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsChangeRolesPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchActionsChangeRolesPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsChangeRolesPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/changeRoles" + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00230(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsChangeRolesPost path with ticket_id + + ## Test + + - path includes ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchActionsChangeRolesPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsChangeRolesPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/changeRoles?ticketId=CHG12345" + + +# ============================================================================= +# Test: EpManageFabricsSwitchActionsImportBootstrapPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00300(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsImportBootstrapPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchActionsImportBootstrapPost.__init__() + - EpManageFabricsSwitchActionsImportBootstrapPost.class_name + - EpManageFabricsSwitchActionsImportBootstrapPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsImportBootstrapPost() + assert instance.class_name == "EpManageFabricsSwitchActionsImportBootstrapPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00310(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsImportBootstrapPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchActionsImportBootstrapPost.path + """ + instance = EpManageFabricsSwitchActionsImportBootstrapPost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00320(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsImportBootstrapPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchActionsImportBootstrapPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsImportBootstrapPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/importBootstrap" + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00330(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsImportBootstrapPost path with cluster_name and ticket_id + + ## Test + + - path includes clusterName and ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchActionsImportBootstrapPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsImportBootstrapPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.cluster_name = "cluster1" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result.startswith("/api/v1/manage/fabrics/MyFabric/switchActions/importBootstrap?") + assert "clusterName=cluster1" in result + assert "ticketId=CHG12345" in result + + +# ============================================================================= +# Test: EpManageFabricsSwitchActionsPreProvisionPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00400(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsPreProvisionPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchActionsPreProvisionPost.__init__() + - EpManageFabricsSwitchActionsPreProvisionPost.class_name + - EpManageFabricsSwitchActionsPreProvisionPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsPreProvisionPost() + assert instance.class_name == "EpManageFabricsSwitchActionsPreProvisionPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00410(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsPreProvisionPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchActionsPreProvisionPost.path + """ + instance = EpManageFabricsSwitchActionsPreProvisionPost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00420(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsPreProvisionPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchActionsPreProvisionPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsPreProvisionPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/preProvision" + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00430(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsPreProvisionPost path with cluster_name and ticket_id + + ## Test + + - path includes clusterName and ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchActionsPreProvisionPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsPreProvisionPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.cluster_name = "cluster1" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result.startswith("/api/v1/manage/fabrics/MyFabric/switchActions/preProvision?") + assert "clusterName=cluster1" in result + assert "ticketId=CHG12345" in result + + +# ============================================================================= +# Test: EpManageFabricsSwitchActionsRediscoverPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00700(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRediscoverPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRediscoverPost.__init__() + - EpManageFabricsSwitchActionsRediscoverPost.class_name + - EpManageFabricsSwitchActionsRediscoverPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRediscoverPost() + assert instance.class_name == "EpManageFabricsSwitchActionsRediscoverPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00710(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRediscoverPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRediscoverPost.path + """ + instance = EpManageFabricsSwitchActionsRediscoverPost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00720(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRediscoverPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRediscoverPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRediscoverPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/rediscover" + + +def test_endpoints_api_v1_manage_fabrics_switchactions_00730(): + """ + # Summary + + Verify EpManageFabricsSwitchActionsRediscoverPost path with ticket_id + + ## Test + + - path includes ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchActionsRediscoverPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchActionsRediscoverPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switchActions/rediscover?ticketId=CHG12345" diff --git a/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py new file mode 100644 index 000000000..6ee60ef1d --- /dev/null +++ b/tests/unit/module_utils/endpoints/test_endpoints_api_v1_manage_fabrics_switches.py @@ -0,0 +1,613 @@ +# Copyright: (c) 2026, Akshayanat C S (@achengam) + +# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) + +""" +Unit tests for manage_fabrics_switches.py + +Tests the ND Manage Fabrics Switches endpoint classes. +""" + +from __future__ import absolute_import, annotations, division, print_function + +# pylint: disable=invalid-name +__metaclass__ = type +# pylint: enable=invalid-name + +import pytest +from ansible_collections.cisco.nd.plugins.module_utils.endpoints.v1.manage.manage_fabrics_switches import ( + EpManageFabricsSwitchesGet, + EpManageFabricsSwitchesPost, + EpManageFabricsSwitchChangeSerialNumberPost, + EpManageFabricsSwitchProvisionRMAPost, + FabricSwitchesAddEndpointParams, + FabricSwitchesGetEndpointParams, + SwitchActionsClusterEndpointParams, +) +from ansible_collections.cisco.nd.plugins.module_utils.enums import HttpVerbEnum +from ansible_collections.cisco.nd.tests.unit.module_utils.common_utils import ( + does_not_raise, +) + +# ============================================================================= +# Test: FabricSwitchesGetEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00010(): + """ + # Summary + + Verify FabricSwitchesGetEndpointParams default values + + ## Test + + - hostname defaults to None + - max defaults to None + - offset defaults to None + + ## Classes and Methods + + - FabricSwitchesGetEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricSwitchesGetEndpointParams() + assert params.hostname is None + assert params.max is None + assert params.offset is None + + +def test_endpoints_api_v1_manage_fabrics_switches_00020(): + """ + # Summary + + Verify FabricSwitchesGetEndpointParams hostname can be set + + ## Test + + - hostname can be set to a string value + + ## Classes and Methods + + - FabricSwitchesGetEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricSwitchesGetEndpointParams(hostname="leaf1") + assert params.hostname == "leaf1" + + +def test_endpoints_api_v1_manage_fabrics_switches_00030(): + """ + # Summary + + Verify FabricSwitchesGetEndpointParams generates query string with hostname and max + + ## Test + + - to_query_string() includes hostname and max when both are set + + ## Classes and Methods + + - FabricSwitchesGetEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricSwitchesGetEndpointParams(hostname="leaf1", max=100) + result = params.to_query_string() + assert "hostname=leaf1" in result + assert "max=100" in result + + +def test_endpoints_api_v1_manage_fabrics_switches_00040(): + """ + # Summary + + Verify FabricSwitchesAddEndpointParams default values + + ## Test + + - cluster_name defaults to None + - ticket_id defaults to None + + ## Classes and Methods + + - FabricSwitchesAddEndpointParams.__init__() + """ + with does_not_raise(): + params = FabricSwitchesAddEndpointParams() + assert params.cluster_name is None + assert params.ticket_id is None + + +def test_endpoints_api_v1_manage_fabrics_switches_00050(): + """ + # Summary + + Verify FabricSwitchesAddEndpointParams generates query string with both params + + ## Test + + - to_query_string() includes clusterName and ticketId when both are set + + ## Classes and Methods + + - FabricSwitchesAddEndpointParams.to_query_string() + """ + with does_not_raise(): + params = FabricSwitchesAddEndpointParams(cluster_name="cluster1", ticket_id="CHG12345") + result = params.to_query_string() + assert "clusterName=cluster1" in result + assert "ticketId=CHG12345" in result + + +# ============================================================================= +# Test: EpManageFabricsSwitchesGet +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00100(): + """ + # Summary + + Verify EpManageFabricsSwitchesGet basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is GET + + ## Classes and Methods + + - EpManageFabricsSwitchesGet.__init__() + - EpManageFabricsSwitchesGet.class_name + - EpManageFabricsSwitchesGet.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesGet() + assert instance.class_name == "EpManageFabricsSwitchesGet" + assert instance.verb == HttpVerbEnum.GET + + +def test_endpoints_api_v1_manage_fabrics_switches_00110(): + """ + # Summary + + Verify EpManageFabricsSwitchesGet raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchesGet.path + """ + instance = EpManageFabricsSwitchesGet() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00120(): + """ + # Summary + + Verify EpManageFabricsSwitchesGet path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchesGet.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesGet() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches" + + +def test_endpoints_api_v1_manage_fabrics_switches_00130(): + """ + # Summary + + Verify EpManageFabricsSwitchesGet path with hostname filter + + ## Test + + - path includes hostname in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchesGet.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesGet() + instance.fabric_name = "MyFabric" + instance.endpoint_params.hostname = "leaf1" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches?hostname=leaf1" + + +# ============================================================================= +# Test: EpManageFabricsSwitchesPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00200(): + """ + # Summary + + Verify EpManageFabricsSwitchesPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchesPost.__init__() + - EpManageFabricsSwitchesPost.class_name + - EpManageFabricsSwitchesPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesPost() + assert instance.class_name == "EpManageFabricsSwitchesPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switches_00210(): + """ + # Summary + + Verify EpManageFabricsSwitchesPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchesPost.path + """ + instance = EpManageFabricsSwitchesPost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00220(): + """ + # Summary + + Verify EpManageFabricsSwitchesPost path without query params + + ## Test + + - path returns correct endpoint path + + ## Classes and Methods + + - EpManageFabricsSwitchesPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesPost() + instance.fabric_name = "MyFabric" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches" + + +def test_endpoints_api_v1_manage_fabrics_switches_00230(): + """ + # Summary + + Verify EpManageFabricsSwitchesPost path with cluster_name and ticket_id + + ## Test + + - path includes clusterName and ticketId in query string when set + + ## Classes and Methods + + - EpManageFabricsSwitchesPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchesPost() + instance.fabric_name = "MyFabric" + instance.endpoint_params.cluster_name = "cluster1" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result.startswith("/api/v1/manage/fabrics/MyFabric/switches?") + assert "clusterName=cluster1" in result + assert "ticketId=CHG12345" in result + + +# ============================================================================= +# Test: SwitchActionsClusterEndpointParams +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00300(): + """ + # Summary + + Verify SwitchActionsClusterEndpointParams basic instantiation + + ## Test + + - Instance can be created with defaults + - cluster_name defaults to None + + ## Classes and Methods + + - SwitchActionsClusterEndpointParams.__init__() + """ + with does_not_raise(): + instance = SwitchActionsClusterEndpointParams() + assert instance.cluster_name is None + + +def test_endpoints_api_v1_manage_fabrics_switches_00310(): + """ + # Summary + + Verify SwitchActionsClusterEndpointParams to_query_string returns empty when no params set + + ## Test + + - to_query_string() returns empty string when cluster_name is None + + ## Classes and Methods + + - SwitchActionsClusterEndpointParams.to_query_string() + """ + instance = SwitchActionsClusterEndpointParams() + assert instance.to_query_string() == "" + + +def test_endpoints_api_v1_manage_fabrics_switches_00320(): + """ + # Summary + + Verify SwitchActionsClusterEndpointParams to_query_string with cluster_name + + ## Test + + - to_query_string() returns "clusterName=cluster1" when cluster_name is set + + ## Classes and Methods + + - SwitchActionsClusterEndpointParams.to_query_string() + """ + instance = SwitchActionsClusterEndpointParams(cluster_name="cluster1") + assert instance.to_query_string() == "clusterName=cluster1" + + +# ============================================================================= +# Test: EpManageFabricsSwitchProvisionRMAPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00500(): + """ + # Summary + + Verify EpManageFabricsSwitchProvisionRMAPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchProvisionRMAPost.__init__() + - EpManageFabricsSwitchProvisionRMAPost.class_name + - EpManageFabricsSwitchProvisionRMAPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchProvisionRMAPost() + assert instance.class_name == "EpManageFabricsSwitchProvisionRMAPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switches_00510(): + """ + # Summary + + Verify EpManageFabricsSwitchProvisionRMAPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchProvisionRMAPost.path + """ + instance = EpManageFabricsSwitchProvisionRMAPost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00520(): + """ + # Summary + + Verify EpManageFabricsSwitchProvisionRMAPost raises ValueError when switch_sn is not set + + ## Test + + - Accessing path raises ValueError when switch_sn is None + + ## Classes and Methods + + - EpManageFabricsSwitchProvisionRMAPost.path + """ + instance = EpManageFabricsSwitchProvisionRMAPost() + instance.fabric_name = "MyFabric" + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00530(): + """ + # Summary + + Verify EpManageFabricsSwitchProvisionRMAPost path without query params + + ## Test + + - Path is correctly built with fabric_name and switch_sn + - No query string appended when ticket_id is not set + + ## Classes and Methods + + - EpManageFabricsSwitchProvisionRMAPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchProvisionRMAPost() + instance.fabric_name = "MyFabric" + instance.switch_sn = "SAL1948TRTT" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA" + + +def test_endpoints_api_v1_manage_fabrics_switches_00540(): + """ + # Summary + + Verify EpManageFabricsSwitchProvisionRMAPost path with ticket_id + + ## Test + + - Path includes ticketId query parameter when set + + ## Classes and Methods + + - EpManageFabricsSwitchProvisionRMAPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchProvisionRMAPost() + instance.fabric_name = "MyFabric" + instance.switch_sn = "SAL1948TRTT" + instance.endpoint_params.ticket_id = "CHG12345" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/provisionRMA?ticketId=CHG12345" + + +# ============================================================================= +# Test: EpManageFabricsSwitchChangeSerialNumberPost +# ============================================================================= + + +def test_endpoints_api_v1_manage_fabrics_switches_00600(): + """ + # Summary + + Verify EpManageFabricsSwitchChangeSerialNumberPost basic instantiation + + ## Test + + - Instance can be created + - class_name is set correctly + - verb is POST + + ## Classes and Methods + + - EpManageFabricsSwitchChangeSerialNumberPost.__init__() + - EpManageFabricsSwitchChangeSerialNumberPost.class_name + - EpManageFabricsSwitchChangeSerialNumberPost.verb + """ + with does_not_raise(): + instance = EpManageFabricsSwitchChangeSerialNumberPost() + assert instance.class_name == "EpManageFabricsSwitchChangeSerialNumberPost" + assert instance.verb == HttpVerbEnum.POST + + +def test_endpoints_api_v1_manage_fabrics_switches_00610(): + """ + # Summary + + Verify EpManageFabricsSwitchChangeSerialNumberPost raises ValueError when fabric_name is not set + + ## Test + + - Accessing path raises ValueError when fabric_name is None + + ## Classes and Methods + + - EpManageFabricsSwitchChangeSerialNumberPost.path + """ + instance = EpManageFabricsSwitchChangeSerialNumberPost() + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00620(): + """ + # Summary + + Verify EpManageFabricsSwitchChangeSerialNumberPost raises ValueError when switch_sn is not set + + ## Test + + - Accessing path raises ValueError when switch_sn is None + + ## Classes and Methods + + - EpManageFabricsSwitchChangeSerialNumberPost.path + """ + instance = EpManageFabricsSwitchChangeSerialNumberPost() + instance.fabric_name = "MyFabric" + with pytest.raises(ValueError): + instance.path + + +def test_endpoints_api_v1_manage_fabrics_switches_00630(): + """ + # Summary + + Verify EpManageFabricsSwitchChangeSerialNumberPost path without query params + + ## Test + + - Path is correctly built with fabric_name and switch_sn + - No query string appended when cluster_name is not set + + ## Classes and Methods + + - EpManageFabricsSwitchChangeSerialNumberPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchChangeSerialNumberPost() + instance.fabric_name = "MyFabric" + instance.switch_sn = "SAL1948TRTT" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber" + + +def test_endpoints_api_v1_manage_fabrics_switches_00640(): + """ + # Summary + + Verify EpManageFabricsSwitchChangeSerialNumberPost path with cluster_name + + ## Test + + - Path includes clusterName query parameter when set + + ## Classes and Methods + + - EpManageFabricsSwitchChangeSerialNumberPost.path + """ + with does_not_raise(): + instance = EpManageFabricsSwitchChangeSerialNumberPost() + instance.fabric_name = "MyFabric" + instance.switch_sn = "SAL1948TRTT" + instance.endpoint_params.cluster_name = "cluster1" + result = instance.path + assert result == "/api/v1/manage/fabrics/MyFabric/switches/SAL1948TRTT/actions/changeSwitchSerialNumber?clusterName=cluster1"